FFmpeg
hevcdec.c
Go to the documentation of this file.
1 /*
2  * HEVC video Decoder
3  *
4  * Copyright (C) 2012 - 2013 Guillaume Martres
5  * Copyright (C) 2012 - 2013 Mickael Raulet
6  * Copyright (C) 2012 - 2013 Gildas Cocherel
7  * Copyright (C) 2012 - 2013 Wassim Hamidouche
8  *
9  * This file is part of FFmpeg.
10  *
11  * FFmpeg is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public
13  * License as published by the Free Software Foundation; either
14  * version 2.1 of the License, or (at your option) any later version.
15  *
16  * FFmpeg is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with FFmpeg; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24  */
25 
26 #include "config_components.h"
27 
28 #include "libavutil/attributes.h"
29 #include "libavutil/avstring.h"
30 #include "libavutil/common.h"
31 #include "libavutil/display.h"
33 #include "libavutil/internal.h"
35 #include "libavutil/md5.h"
36 #include "libavutil/opt.h"
37 #include "libavutil/pixdesc.h"
38 #include "libavutil/stereo3d.h"
39 #include "libavutil/timecode.h"
40 
41 #include "bswapdsp.h"
42 #include "bytestream.h"
43 #include "cabac_functions.h"
44 #include "codec_internal.h"
45 #include "golomb.h"
46 #include "hevc.h"
47 #include "hevc_data.h"
48 #include "hevc_parse.h"
49 #include "hevcdec.h"
50 #include "hwconfig.h"
51 #include "internal.h"
52 #include "profiles.h"
53 #include "thread.h"
54 #include "threadframe.h"
55 
56 static const uint8_t hevc_pel_weight[65] = { [2] = 0, [4] = 1, [6] = 2, [8] = 3, [12] = 4, [16] = 5, [24] = 6, [32] = 7, [48] = 8, [64] = 9 };
57 
58 /**
59  * NOTE: Each function hls_foo correspond to the function foo in the
60  * specification (HLS stands for High Level Syntax).
61  */
62 
63 /**
64  * Section 5.7
65  */
66 
67 /* free everything allocated by pic_arrays_init() */
69 {
70  av_freep(&s->sao);
71  av_freep(&s->deblock);
72 
73  av_freep(&s->skip_flag);
74  av_freep(&s->tab_ct_depth);
75 
76  av_freep(&s->tab_ipm);
77  av_freep(&s->cbf_luma);
78  av_freep(&s->is_pcm);
79 
80  av_freep(&s->qp_y_tab);
81  av_freep(&s->tab_slice_address);
82  av_freep(&s->filter_slice_edges);
83 
84  av_freep(&s->horizontal_bs);
85  av_freep(&s->vertical_bs);
86 
87  av_freep(&s->sh.entry_point_offset);
88  av_freep(&s->sh.size);
89  av_freep(&s->sh.offset);
90 
91  av_buffer_pool_uninit(&s->tab_mvf_pool);
92  av_buffer_pool_uninit(&s->rpl_tab_pool);
93 }
94 
95 /* allocate arrays that depend on frame dimensions */
96 static int pic_arrays_init(HEVCContext *s, const HEVCSPS *sps)
97 {
98  int log2_min_cb_size = sps->log2_min_cb_size;
99  int width = sps->width;
100  int height = sps->height;
101  int pic_size_in_ctb = ((width >> log2_min_cb_size) + 1) *
102  ((height >> log2_min_cb_size) + 1);
103  int ctb_count = sps->ctb_width * sps->ctb_height;
104  int min_pu_size = sps->min_pu_width * sps->min_pu_height;
105 
106  s->bs_width = (width >> 2) + 1;
107  s->bs_height = (height >> 2) + 1;
108 
109  s->sao = av_calloc(ctb_count, sizeof(*s->sao));
110  s->deblock = av_calloc(ctb_count, sizeof(*s->deblock));
111  if (!s->sao || !s->deblock)
112  goto fail;
113 
114  s->skip_flag = av_malloc_array(sps->min_cb_height, sps->min_cb_width);
115  s->tab_ct_depth = av_malloc_array(sps->min_cb_height, sps->min_cb_width);
116  if (!s->skip_flag || !s->tab_ct_depth)
117  goto fail;
118 
119  s->cbf_luma = av_malloc_array(sps->min_tb_width, sps->min_tb_height);
120  s->tab_ipm = av_mallocz(min_pu_size);
121  s->is_pcm = av_malloc_array(sps->min_pu_width + 1, sps->min_pu_height + 1);
122  if (!s->tab_ipm || !s->cbf_luma || !s->is_pcm)
123  goto fail;
124 
125  s->filter_slice_edges = av_mallocz(ctb_count);
126  s->tab_slice_address = av_malloc_array(pic_size_in_ctb,
127  sizeof(*s->tab_slice_address));
128  s->qp_y_tab = av_malloc_array(pic_size_in_ctb,
129  sizeof(*s->qp_y_tab));
130  if (!s->qp_y_tab || !s->filter_slice_edges || !s->tab_slice_address)
131  goto fail;
132 
133  s->horizontal_bs = av_calloc(s->bs_width, s->bs_height);
134  s->vertical_bs = av_calloc(s->bs_width, s->bs_height);
135  if (!s->horizontal_bs || !s->vertical_bs)
136  goto fail;
137 
138  s->tab_mvf_pool = av_buffer_pool_init(min_pu_size * sizeof(MvField),
140  s->rpl_tab_pool = av_buffer_pool_init(ctb_count * sizeof(RefPicListTab),
142  if (!s->tab_mvf_pool || !s->rpl_tab_pool)
143  goto fail;
144 
145  return 0;
146 
147 fail:
149  return AVERROR(ENOMEM);
150 }
151 
153 {
154  int i = 0;
155  int j = 0;
156  uint8_t luma_weight_l0_flag[16];
157  uint8_t chroma_weight_l0_flag[16];
158  uint8_t luma_weight_l1_flag[16];
159  uint8_t chroma_weight_l1_flag[16];
160  int luma_log2_weight_denom;
161 
162  luma_log2_weight_denom = get_ue_golomb_long(gb);
163  if (luma_log2_weight_denom < 0 || luma_log2_weight_denom > 7) {
164  av_log(s->avctx, AV_LOG_ERROR, "luma_log2_weight_denom %d is invalid\n", luma_log2_weight_denom);
165  return AVERROR_INVALIDDATA;
166  }
167  s->sh.luma_log2_weight_denom = av_clip_uintp2(luma_log2_weight_denom, 3);
168  if (s->ps.sps->chroma_format_idc != 0) {
169  int64_t chroma_log2_weight_denom = luma_log2_weight_denom + (int64_t)get_se_golomb(gb);
170  if (chroma_log2_weight_denom < 0 || chroma_log2_weight_denom > 7) {
171  av_log(s->avctx, AV_LOG_ERROR, "chroma_log2_weight_denom %"PRId64" is invalid\n", chroma_log2_weight_denom);
172  return AVERROR_INVALIDDATA;
173  }
174  s->sh.chroma_log2_weight_denom = chroma_log2_weight_denom;
175  }
176 
177  for (i = 0; i < s->sh.nb_refs[L0]; i++) {
178  luma_weight_l0_flag[i] = get_bits1(gb);
179  if (!luma_weight_l0_flag[i]) {
180  s->sh.luma_weight_l0[i] = 1 << s->sh.luma_log2_weight_denom;
181  s->sh.luma_offset_l0[i] = 0;
182  }
183  }
184  if (s->ps.sps->chroma_format_idc != 0) {
185  for (i = 0; i < s->sh.nb_refs[L0]; i++)
186  chroma_weight_l0_flag[i] = get_bits1(gb);
187  } else {
188  for (i = 0; i < s->sh.nb_refs[L0]; i++)
189  chroma_weight_l0_flag[i] = 0;
190  }
191  for (i = 0; i < s->sh.nb_refs[L0]; i++) {
192  if (luma_weight_l0_flag[i]) {
193  int delta_luma_weight_l0 = get_se_golomb(gb);
194  if ((int8_t)delta_luma_weight_l0 != delta_luma_weight_l0)
195  return AVERROR_INVALIDDATA;
196  s->sh.luma_weight_l0[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l0;
197  s->sh.luma_offset_l0[i] = get_se_golomb(gb);
198  }
199  if (chroma_weight_l0_flag[i]) {
200  for (j = 0; j < 2; j++) {
201  int delta_chroma_weight_l0 = get_se_golomb(gb);
202  int delta_chroma_offset_l0 = get_se_golomb(gb);
203 
204  if ( (int8_t)delta_chroma_weight_l0 != delta_chroma_weight_l0
205  || delta_chroma_offset_l0 < -(1<<17) || delta_chroma_offset_l0 > (1<<17)) {
206  return AVERROR_INVALIDDATA;
207  }
208 
209  s->sh.chroma_weight_l0[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l0;
210  s->sh.chroma_offset_l0[i][j] = av_clip((delta_chroma_offset_l0 - ((128 * s->sh.chroma_weight_l0[i][j])
211  >> s->sh.chroma_log2_weight_denom) + 128), -128, 127);
212  }
213  } else {
214  s->sh.chroma_weight_l0[i][0] = 1 << s->sh.chroma_log2_weight_denom;
215  s->sh.chroma_offset_l0[i][0] = 0;
216  s->sh.chroma_weight_l0[i][1] = 1 << s->sh.chroma_log2_weight_denom;
217  s->sh.chroma_offset_l0[i][1] = 0;
218  }
219  }
220  if (s->sh.slice_type == HEVC_SLICE_B) {
221  for (i = 0; i < s->sh.nb_refs[L1]; i++) {
222  luma_weight_l1_flag[i] = get_bits1(gb);
223  if (!luma_weight_l1_flag[i]) {
224  s->sh.luma_weight_l1[i] = 1 << s->sh.luma_log2_weight_denom;
225  s->sh.luma_offset_l1[i] = 0;
226  }
227  }
228  if (s->ps.sps->chroma_format_idc != 0) {
229  for (i = 0; i < s->sh.nb_refs[L1]; i++)
230  chroma_weight_l1_flag[i] = get_bits1(gb);
231  } else {
232  for (i = 0; i < s->sh.nb_refs[L1]; i++)
233  chroma_weight_l1_flag[i] = 0;
234  }
235  for (i = 0; i < s->sh.nb_refs[L1]; i++) {
236  if (luma_weight_l1_flag[i]) {
237  int delta_luma_weight_l1 = get_se_golomb(gb);
238  if ((int8_t)delta_luma_weight_l1 != delta_luma_weight_l1)
239  return AVERROR_INVALIDDATA;
240  s->sh.luma_weight_l1[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l1;
241  s->sh.luma_offset_l1[i] = get_se_golomb(gb);
242  }
243  if (chroma_weight_l1_flag[i]) {
244  for (j = 0; j < 2; j++) {
245  int delta_chroma_weight_l1 = get_se_golomb(gb);
246  int delta_chroma_offset_l1 = get_se_golomb(gb);
247 
248  if ( (int8_t)delta_chroma_weight_l1 != delta_chroma_weight_l1
249  || delta_chroma_offset_l1 < -(1<<17) || delta_chroma_offset_l1 > (1<<17)) {
250  return AVERROR_INVALIDDATA;
251  }
252 
253  s->sh.chroma_weight_l1[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l1;
254  s->sh.chroma_offset_l1[i][j] = av_clip((delta_chroma_offset_l1 - ((128 * s->sh.chroma_weight_l1[i][j])
255  >> s->sh.chroma_log2_weight_denom) + 128), -128, 127);
256  }
257  } else {
258  s->sh.chroma_weight_l1[i][0] = 1 << s->sh.chroma_log2_weight_denom;
259  s->sh.chroma_offset_l1[i][0] = 0;
260  s->sh.chroma_weight_l1[i][1] = 1 << s->sh.chroma_log2_weight_denom;
261  s->sh.chroma_offset_l1[i][1] = 0;
262  }
263  }
264  }
265  return 0;
266 }
267 
269 {
270  const HEVCSPS *sps = s->ps.sps;
271  int max_poc_lsb = 1 << sps->log2_max_poc_lsb;
272  int prev_delta_msb = 0;
273  unsigned int nb_sps = 0, nb_sh;
274  int i;
275 
276  rps->nb_refs = 0;
277  if (!sps->long_term_ref_pics_present_flag)
278  return 0;
279 
280  if (sps->num_long_term_ref_pics_sps > 0)
281  nb_sps = get_ue_golomb_long(gb);
282  nb_sh = get_ue_golomb_long(gb);
283 
284  if (nb_sps > sps->num_long_term_ref_pics_sps)
285  return AVERROR_INVALIDDATA;
286  if (nb_sh + (uint64_t)nb_sps > FF_ARRAY_ELEMS(rps->poc))
287  return AVERROR_INVALIDDATA;
288 
289  rps->nb_refs = nb_sh + nb_sps;
290 
291  for (i = 0; i < rps->nb_refs; i++) {
292 
293  if (i < nb_sps) {
294  uint8_t lt_idx_sps = 0;
295 
296  if (sps->num_long_term_ref_pics_sps > 1)
297  lt_idx_sps = get_bits(gb, av_ceil_log2(sps->num_long_term_ref_pics_sps));
298 
299  rps->poc[i] = sps->lt_ref_pic_poc_lsb_sps[lt_idx_sps];
300  rps->used[i] = sps->used_by_curr_pic_lt_sps_flag[lt_idx_sps];
301  } else {
302  rps->poc[i] = get_bits(gb, sps->log2_max_poc_lsb);
303  rps->used[i] = get_bits1(gb);
304  }
305 
306  rps->poc_msb_present[i] = get_bits1(gb);
307  if (rps->poc_msb_present[i]) {
308  int64_t delta = get_ue_golomb_long(gb);
309  int64_t poc;
310 
311  if (i && i != nb_sps)
312  delta += prev_delta_msb;
313 
314  poc = rps->poc[i] + s->poc - delta * max_poc_lsb - s->sh.pic_order_cnt_lsb;
315  if (poc != (int32_t)poc)
316  return AVERROR_INVALIDDATA;
317  rps->poc[i] = poc;
318  prev_delta_msb = delta;
319  }
320  }
321 
322  return 0;
323 }
324 
326 {
327  AVCodecContext *avctx = s->avctx;
328  const HEVCParamSets *ps = &s->ps;
329  const HEVCVPS *vps = (const HEVCVPS*)ps->vps_list[sps->vps_id]->data;
330  const HEVCWindow *ow = &sps->output_window;
331  unsigned int num = 0, den = 0;
332 
333  avctx->pix_fmt = sps->pix_fmt;
334  avctx->coded_width = sps->width;
335  avctx->coded_height = sps->height;
336  avctx->width = sps->width - ow->left_offset - ow->right_offset;
337  avctx->height = sps->height - ow->top_offset - ow->bottom_offset;
338  avctx->has_b_frames = sps->temporal_layer[sps->max_sub_layers - 1].num_reorder_pics;
339  avctx->profile = sps->ptl.general_ptl.profile_idc;
340  avctx->level = sps->ptl.general_ptl.level_idc;
341 
342  ff_set_sar(avctx, sps->vui.sar);
343 
344  if (sps->vui.video_signal_type_present_flag)
345  avctx->color_range = sps->vui.video_full_range_flag ? AVCOL_RANGE_JPEG
347  else
348  avctx->color_range = AVCOL_RANGE_MPEG;
349 
350  if (sps->vui.colour_description_present_flag) {
351  avctx->color_primaries = sps->vui.colour_primaries;
352  avctx->color_trc = sps->vui.transfer_characteristic;
353  avctx->colorspace = sps->vui.matrix_coeffs;
354  } else {
358  }
359 
361  if (sps->chroma_format_idc == 1) {
362  if (sps->vui.chroma_loc_info_present_flag) {
363  if (sps->vui.chroma_sample_loc_type_top_field <= 5)
364  avctx->chroma_sample_location = sps->vui.chroma_sample_loc_type_top_field + 1;
365  } else
367  }
368 
369  if (vps->vps_timing_info_present_flag) {
370  num = vps->vps_num_units_in_tick;
371  den = vps->vps_time_scale;
372  } else if (sps->vui.vui_timing_info_present_flag) {
373  num = sps->vui.vui_num_units_in_tick;
374  den = sps->vui.vui_time_scale;
375  }
376 
377  if (num != 0 && den != 0)
378  av_reduce(&avctx->framerate.den, &avctx->framerate.num,
379  num, den, 1 << 30);
380 }
381 
383 {
384  AVCodecContext *avctx = s->avctx;
385 
386  if (s->sei.a53_caption.buf_ref)
387  s->avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS;
388 
389  if (s->sei.alternative_transfer.present &&
390  av_color_transfer_name(s->sei.alternative_transfer.preferred_transfer_characteristics) &&
391  s->sei.alternative_transfer.preferred_transfer_characteristics != AVCOL_TRC_UNSPECIFIED) {
392  avctx->color_trc = s->sei.alternative_transfer.preferred_transfer_characteristics;
393  }
394 
395  if (s->sei.film_grain_characteristics.present)
397 
398  return 0;
399 }
400 
402 {
403 #define HWACCEL_MAX (CONFIG_HEVC_DXVA2_HWACCEL + \
404  CONFIG_HEVC_D3D11VA_HWACCEL * 2 + \
405  CONFIG_HEVC_NVDEC_HWACCEL + \
406  CONFIG_HEVC_VAAPI_HWACCEL + \
407  CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL + \
408  CONFIG_HEVC_VDPAU_HWACCEL)
409  enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmt = pix_fmts;
410 
411  switch (sps->pix_fmt) {
412  case AV_PIX_FMT_YUV420P:
413  case AV_PIX_FMT_YUVJ420P:
414 #if CONFIG_HEVC_DXVA2_HWACCEL
415  *fmt++ = AV_PIX_FMT_DXVA2_VLD;
416 #endif
417 #if CONFIG_HEVC_D3D11VA_HWACCEL
418  *fmt++ = AV_PIX_FMT_D3D11VA_VLD;
419  *fmt++ = AV_PIX_FMT_D3D11;
420 #endif
421 #if CONFIG_HEVC_VAAPI_HWACCEL
422  *fmt++ = AV_PIX_FMT_VAAPI;
423 #endif
424 #if CONFIG_HEVC_VDPAU_HWACCEL
425  *fmt++ = AV_PIX_FMT_VDPAU;
426 #endif
427 #if CONFIG_HEVC_NVDEC_HWACCEL
428  *fmt++ = AV_PIX_FMT_CUDA;
429 #endif
430 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
431  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
432 #endif
433  break;
435 #if CONFIG_HEVC_DXVA2_HWACCEL
436  *fmt++ = AV_PIX_FMT_DXVA2_VLD;
437 #endif
438 #if CONFIG_HEVC_D3D11VA_HWACCEL
439  *fmt++ = AV_PIX_FMT_D3D11VA_VLD;
440  *fmt++ = AV_PIX_FMT_D3D11;
441 #endif
442 #if CONFIG_HEVC_VAAPI_HWACCEL
443  *fmt++ = AV_PIX_FMT_VAAPI;
444 #endif
445 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
446  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
447 #endif
448 #if CONFIG_HEVC_VDPAU_HWACCEL
449  *fmt++ = AV_PIX_FMT_VDPAU;
450 #endif
451 #if CONFIG_HEVC_NVDEC_HWACCEL
452  *fmt++ = AV_PIX_FMT_CUDA;
453 #endif
454  break;
455  case AV_PIX_FMT_YUV444P:
456 #if CONFIG_HEVC_VAAPI_HWACCEL
457  *fmt++ = AV_PIX_FMT_VAAPI;
458 #endif
459 #if CONFIG_HEVC_VDPAU_HWACCEL
460  *fmt++ = AV_PIX_FMT_VDPAU;
461 #endif
462 #if CONFIG_HEVC_NVDEC_HWACCEL
463  *fmt++ = AV_PIX_FMT_CUDA;
464 #endif
465 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
466  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
467 #endif
468  break;
469  case AV_PIX_FMT_YUV422P:
471 #if CONFIG_HEVC_VAAPI_HWACCEL
472  *fmt++ = AV_PIX_FMT_VAAPI;
473 #endif
474 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
475  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
476 #endif
477  break;
479 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
480  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
481 #endif
484 #if CONFIG_HEVC_VDPAU_HWACCEL
485  *fmt++ = AV_PIX_FMT_VDPAU;
486 #endif
487 #if CONFIG_HEVC_NVDEC_HWACCEL
488  *fmt++ = AV_PIX_FMT_CUDA;
489 #endif
490  break;
491  }
492 
493  *fmt++ = sps->pix_fmt;
494  *fmt = AV_PIX_FMT_NONE;
495 
496  return ff_thread_get_format(s->avctx, pix_fmts);
497 }
498 
499 static int set_sps(HEVCContext *s, const HEVCSPS *sps,
500  enum AVPixelFormat pix_fmt)
501 {
502  int ret, i;
503 
505  s->ps.sps = NULL;
506  s->ps.vps = NULL;
507 
508  if (!sps)
509  return 0;
510 
511  ret = pic_arrays_init(s, sps);
512  if (ret < 0)
513  goto fail;
514 
516 
517  s->avctx->pix_fmt = pix_fmt;
518 
519  ff_hevc_pred_init(&s->hpc, sps->bit_depth);
520  ff_hevc_dsp_init (&s->hevcdsp, sps->bit_depth);
521  ff_videodsp_init (&s->vdsp, sps->bit_depth);
522 
523  for (i = 0; i < 3; i++) {
524  av_freep(&s->sao_pixel_buffer_h[i]);
525  av_freep(&s->sao_pixel_buffer_v[i]);
526  }
527 
528  if (sps->sao_enabled && !s->avctx->hwaccel) {
529  int c_count = (sps->chroma_format_idc != 0) ? 3 : 1;
530  int c_idx;
531 
532  for(c_idx = 0; c_idx < c_count; c_idx++) {
533  int w = sps->width >> sps->hshift[c_idx];
534  int h = sps->height >> sps->vshift[c_idx];
535  s->sao_pixel_buffer_h[c_idx] =
536  av_malloc((w * 2 * sps->ctb_height) <<
537  sps->pixel_shift);
538  s->sao_pixel_buffer_v[c_idx] =
539  av_malloc((h * 2 * sps->ctb_width) <<
540  sps->pixel_shift);
541  if (!s->sao_pixel_buffer_h[c_idx] ||
542  !s->sao_pixel_buffer_v[c_idx])
543  goto fail;
544  }
545  }
546 
547  s->ps.sps = sps;
548  s->ps.vps = (HEVCVPS*) s->ps.vps_list[s->ps.sps->vps_id]->data;
549 
550  return 0;
551 
552 fail:
554  for (i = 0; i < 3; i++) {
555  av_freep(&s->sao_pixel_buffer_h[i]);
556  av_freep(&s->sao_pixel_buffer_v[i]);
557  }
558  s->ps.sps = NULL;
559  return ret;
560 }
561 
563 {
564  GetBitContext *gb = &s->HEVClc->gb;
565  SliceHeader *sh = &s->sh;
566  int i, ret;
567 
568  // Coded parameters
570  if (s->ref && sh->first_slice_in_pic_flag) {
571  av_log(s->avctx, AV_LOG_ERROR, "Two slices reporting being the first in the same frame.\n");
572  return 1; // This slice will be skipped later, do not corrupt state
573  }
574 
575  if ((IS_IDR(s) || IS_BLA(s)) && sh->first_slice_in_pic_flag) {
576  s->seq_decode = (s->seq_decode + 1) & HEVC_SEQUENCE_COUNTER_MASK;
577  s->max_ra = INT_MAX;
578  if (IS_IDR(s))
580  }
582  if (IS_IRAP(s))
584 
585  sh->pps_id = get_ue_golomb_long(gb);
586  if (sh->pps_id >= HEVC_MAX_PPS_COUNT || !s->ps.pps_list[sh->pps_id]) {
587  av_log(s->avctx, AV_LOG_ERROR, "PPS id out of range: %d\n", sh->pps_id);
588  return AVERROR_INVALIDDATA;
589  }
590  if (!sh->first_slice_in_pic_flag &&
591  s->ps.pps != (HEVCPPS*)s->ps.pps_list[sh->pps_id]->data) {
592  av_log(s->avctx, AV_LOG_ERROR, "PPS changed between slices.\n");
593  return AVERROR_INVALIDDATA;
594  }
595  s->ps.pps = (HEVCPPS*)s->ps.pps_list[sh->pps_id]->data;
596  if (s->nal_unit_type == HEVC_NAL_CRA_NUT && s->last_eos == 1)
598 
599  if (s->ps.sps != (HEVCSPS*)s->ps.sps_list[s->ps.pps->sps_id]->data) {
600  const HEVCSPS *sps = (HEVCSPS*)s->ps.sps_list[s->ps.pps->sps_id]->data;
601  enum AVPixelFormat pix_fmt;
602 
604 
605  ret = set_sps(s, sps, sps->pix_fmt);
606  if (ret < 0)
607  return ret;
608 
609  pix_fmt = get_format(s, sps);
610  if (pix_fmt < 0)
611  return pix_fmt;
612  s->avctx->pix_fmt = pix_fmt;
613 
614  s->seq_decode = (s->seq_decode + 1) & HEVC_SEQUENCE_COUNTER_MASK;
615  s->max_ra = INT_MAX;
616  }
617 
619  if (ret < 0)
620  return ret;
621 
623  if (!sh->first_slice_in_pic_flag) {
624  int slice_address_length;
625 
626  if (s->ps.pps->dependent_slice_segments_enabled_flag)
628 
629  slice_address_length = av_ceil_log2(s->ps.sps->ctb_width *
630  s->ps.sps->ctb_height);
631  sh->slice_segment_addr = get_bitsz(gb, slice_address_length);
632  if (sh->slice_segment_addr >= s->ps.sps->ctb_width * s->ps.sps->ctb_height) {
633  av_log(s->avctx, AV_LOG_ERROR,
634  "Invalid slice segment address: %u.\n",
635  sh->slice_segment_addr);
636  return AVERROR_INVALIDDATA;
637  }
638 
639  if (!sh->dependent_slice_segment_flag) {
640  sh->slice_addr = sh->slice_segment_addr;
641  s->slice_idx++;
642  }
643  } else {
644  sh->slice_segment_addr = sh->slice_addr = 0;
645  s->slice_idx = 0;
646  s->slice_initialized = 0;
647  }
648 
649  if (!sh->dependent_slice_segment_flag) {
650  s->slice_initialized = 0;
651 
652  for (i = 0; i < s->ps.pps->num_extra_slice_header_bits; i++)
653  skip_bits(gb, 1); // slice_reserved_undetermined_flag[]
654 
655  sh->slice_type = get_ue_golomb_long(gb);
656  if (!(sh->slice_type == HEVC_SLICE_I ||
657  sh->slice_type == HEVC_SLICE_P ||
658  sh->slice_type == HEVC_SLICE_B)) {
659  av_log(s->avctx, AV_LOG_ERROR, "Unknown slice type: %d.\n",
660  sh->slice_type);
661  return AVERROR_INVALIDDATA;
662  }
663  if (IS_IRAP(s) && sh->slice_type != HEVC_SLICE_I) {
664  av_log(s->avctx, AV_LOG_ERROR, "Inter slices in an IRAP frame.\n");
665  return AVERROR_INVALIDDATA;
666  }
667 
668  // when flag is not present, picture is inferred to be output
669  sh->pic_output_flag = 1;
670  if (s->ps.pps->output_flag_present_flag)
671  sh->pic_output_flag = get_bits1(gb);
672 
673  if (s->ps.sps->separate_colour_plane_flag)
674  sh->colour_plane_id = get_bits(gb, 2);
675 
676  if (!IS_IDR(s)) {
677  int poc, pos;
678 
679  sh->pic_order_cnt_lsb = get_bits(gb, s->ps.sps->log2_max_poc_lsb);
680  poc = ff_hevc_compute_poc(s->ps.sps, s->pocTid0, sh->pic_order_cnt_lsb, s->nal_unit_type);
681  if (!sh->first_slice_in_pic_flag && poc != s->poc) {
682  av_log(s->avctx, AV_LOG_WARNING,
683  "Ignoring POC change between slices: %d -> %d\n", s->poc, poc);
684  if (s->avctx->err_recognition & AV_EF_EXPLODE)
685  return AVERROR_INVALIDDATA;
686  poc = s->poc;
687  }
688  s->poc = poc;
689 
691  pos = get_bits_left(gb);
693  ret = ff_hevc_decode_short_term_rps(gb, s->avctx, &sh->slice_rps, s->ps.sps, 1);
694  if (ret < 0)
695  return ret;
696 
697  sh->short_term_rps = &sh->slice_rps;
698  } else {
699  int numbits, rps_idx;
700 
701  if (!s->ps.sps->nb_st_rps) {
702  av_log(s->avctx, AV_LOG_ERROR, "No ref lists in the SPS.\n");
703  return AVERROR_INVALIDDATA;
704  }
705 
706  numbits = av_ceil_log2(s->ps.sps->nb_st_rps);
707  rps_idx = numbits > 0 ? get_bits(gb, numbits) : 0;
708  sh->short_term_rps = &s->ps.sps->st_rps[rps_idx];
709  }
711 
712  pos = get_bits_left(gb);
713  ret = decode_lt_rps(s, &sh->long_term_rps, gb);
714  if (ret < 0) {
715  av_log(s->avctx, AV_LOG_WARNING, "Invalid long term RPS.\n");
716  if (s->avctx->err_recognition & AV_EF_EXPLODE)
717  return AVERROR_INVALIDDATA;
718  }
720 
721  if (s->ps.sps->sps_temporal_mvp_enabled_flag)
723  else
725  } else {
726  s->sh.short_term_rps = NULL;
727  s->poc = 0;
728  }
729 
730  /* 8.3.1 */
731  if (sh->first_slice_in_pic_flag && s->temporal_id == 0 &&
732  s->nal_unit_type != HEVC_NAL_TRAIL_N &&
733  s->nal_unit_type != HEVC_NAL_TSA_N &&
734  s->nal_unit_type != HEVC_NAL_STSA_N &&
735  s->nal_unit_type != HEVC_NAL_RADL_N &&
736  s->nal_unit_type != HEVC_NAL_RADL_R &&
737  s->nal_unit_type != HEVC_NAL_RASL_N &&
738  s->nal_unit_type != HEVC_NAL_RASL_R)
739  s->pocTid0 = s->poc;
740 
741  if (s->ps.sps->sao_enabled) {
743  if (s->ps.sps->chroma_format_idc) {
746  }
747  } else {
751  }
752 
753  sh->nb_refs[L0] = sh->nb_refs[L1] = 0;
754  if (sh->slice_type == HEVC_SLICE_P || sh->slice_type == HEVC_SLICE_B) {
755  int nb_refs;
756 
757  sh->nb_refs[L0] = s->ps.pps->num_ref_idx_l0_default_active;
758  if (sh->slice_type == HEVC_SLICE_B)
759  sh->nb_refs[L1] = s->ps.pps->num_ref_idx_l1_default_active;
760 
761  if (get_bits1(gb)) { // num_ref_idx_active_override_flag
762  sh->nb_refs[L0] = get_ue_golomb_long(gb) + 1;
763  if (sh->slice_type == HEVC_SLICE_B)
764  sh->nb_refs[L1] = get_ue_golomb_long(gb) + 1;
765  }
766  if (sh->nb_refs[L0] > HEVC_MAX_REFS || sh->nb_refs[L1] > HEVC_MAX_REFS) {
767  av_log(s->avctx, AV_LOG_ERROR, "Too many refs: %d/%d.\n",
768  sh->nb_refs[L0], sh->nb_refs[L1]);
769  return AVERROR_INVALIDDATA;
770  }
771 
772  sh->rpl_modification_flag[0] = 0;
773  sh->rpl_modification_flag[1] = 0;
774  nb_refs = ff_hevc_frame_nb_refs(s);
775  if (!nb_refs) {
776  av_log(s->avctx, AV_LOG_ERROR, "Zero refs for a frame with P or B slices.\n");
777  return AVERROR_INVALIDDATA;
778  }
779 
780  if (s->ps.pps->lists_modification_present_flag && nb_refs > 1) {
781  sh->rpl_modification_flag[0] = get_bits1(gb);
782  if (sh->rpl_modification_flag[0]) {
783  for (i = 0; i < sh->nb_refs[L0]; i++)
784  sh->list_entry_lx[0][i] = get_bits(gb, av_ceil_log2(nb_refs));
785  }
786 
787  if (sh->slice_type == HEVC_SLICE_B) {
788  sh->rpl_modification_flag[1] = get_bits1(gb);
789  if (sh->rpl_modification_flag[1] == 1)
790  for (i = 0; i < sh->nb_refs[L1]; i++)
791  sh->list_entry_lx[1][i] = get_bits(gb, av_ceil_log2(nb_refs));
792  }
793  }
794 
795  if (sh->slice_type == HEVC_SLICE_B)
796  sh->mvd_l1_zero_flag = get_bits1(gb);
797 
798  if (s->ps.pps->cabac_init_present_flag)
799  sh->cabac_init_flag = get_bits1(gb);
800  else
801  sh->cabac_init_flag = 0;
802 
803  sh->collocated_ref_idx = 0;
805  sh->collocated_list = L0;
806  if (sh->slice_type == HEVC_SLICE_B)
807  sh->collocated_list = !get_bits1(gb);
808 
809  if (sh->nb_refs[sh->collocated_list] > 1) {
811  if (sh->collocated_ref_idx >= sh->nb_refs[sh->collocated_list]) {
812  av_log(s->avctx, AV_LOG_ERROR,
813  "Invalid collocated_ref_idx: %d.\n",
814  sh->collocated_ref_idx);
815  return AVERROR_INVALIDDATA;
816  }
817  }
818  }
819 
820  if ((s->ps.pps->weighted_pred_flag && sh->slice_type == HEVC_SLICE_P) ||
821  (s->ps.pps->weighted_bipred_flag && sh->slice_type == HEVC_SLICE_B)) {
822  int ret = pred_weight_table(s, gb);
823  if (ret < 0)
824  return ret;
825  }
826 
828  if (sh->max_num_merge_cand < 1 || sh->max_num_merge_cand > 5) {
829  av_log(s->avctx, AV_LOG_ERROR,
830  "Invalid number of merging MVP candidates: %d.\n",
831  sh->max_num_merge_cand);
832  return AVERROR_INVALIDDATA;
833  }
834  }
835 
836  sh->slice_qp_delta = get_se_golomb(gb);
837 
838  if (s->ps.pps->pic_slice_level_chroma_qp_offsets_present_flag) {
841  if (sh->slice_cb_qp_offset < -12 || sh->slice_cb_qp_offset > 12 ||
842  sh->slice_cr_qp_offset < -12 || sh->slice_cr_qp_offset > 12) {
843  av_log(s->avctx, AV_LOG_ERROR, "Invalid slice cx qp offset.\n");
844  return AVERROR_INVALIDDATA;
845  }
846  } else {
847  sh->slice_cb_qp_offset = 0;
848  sh->slice_cr_qp_offset = 0;
849  }
850 
851  if (s->ps.pps->chroma_qp_offset_list_enabled_flag)
853  else
855 
856  if (s->ps.pps->deblocking_filter_control_present_flag) {
857  int deblocking_filter_override_flag = 0;
858 
859  if (s->ps.pps->deblocking_filter_override_enabled_flag)
860  deblocking_filter_override_flag = get_bits1(gb);
861 
862  if (deblocking_filter_override_flag) {
865  int beta_offset_div2 = get_se_golomb(gb);
866  int tc_offset_div2 = get_se_golomb(gb) ;
867  if (beta_offset_div2 < -6 || beta_offset_div2 > 6 ||
868  tc_offset_div2 < -6 || tc_offset_div2 > 6) {
869  av_log(s->avctx, AV_LOG_ERROR,
870  "Invalid deblock filter offsets: %d, %d\n",
871  beta_offset_div2, tc_offset_div2);
872  return AVERROR_INVALIDDATA;
873  }
874  sh->beta_offset = beta_offset_div2 * 2;
875  sh->tc_offset = tc_offset_div2 * 2;
876  }
877  } else {
878  sh->disable_deblocking_filter_flag = s->ps.pps->disable_dbf;
879  sh->beta_offset = s->ps.pps->beta_offset;
880  sh->tc_offset = s->ps.pps->tc_offset;
881  }
882  } else {
884  sh->beta_offset = 0;
885  sh->tc_offset = 0;
886  }
887 
888  if (s->ps.pps->seq_loop_filter_across_slices_enabled_flag &&
893  } else {
894  sh->slice_loop_filter_across_slices_enabled_flag = s->ps.pps->seq_loop_filter_across_slices_enabled_flag;
895  }
896  } else if (!s->slice_initialized) {
897  av_log(s->avctx, AV_LOG_ERROR, "Independent slice segment missing.\n");
898  return AVERROR_INVALIDDATA;
899  }
900 
901  sh->num_entry_point_offsets = 0;
902  if (s->ps.pps->tiles_enabled_flag || s->ps.pps->entropy_coding_sync_enabled_flag) {
903  unsigned num_entry_point_offsets = get_ue_golomb_long(gb);
904  // It would be possible to bound this tighter but this here is simpler
905  if (num_entry_point_offsets > get_bits_left(gb)) {
906  av_log(s->avctx, AV_LOG_ERROR, "num_entry_point_offsets %d is invalid\n", num_entry_point_offsets);
907  return AVERROR_INVALIDDATA;
908  }
909 
910  sh->num_entry_point_offsets = num_entry_point_offsets;
911  if (sh->num_entry_point_offsets > 0) {
912  int offset_len = get_ue_golomb_long(gb) + 1;
913 
914  if (offset_len < 1 || offset_len > 32) {
915  sh->num_entry_point_offsets = 0;
916  av_log(s->avctx, AV_LOG_ERROR, "offset_len %d is invalid\n", offset_len);
917  return AVERROR_INVALIDDATA;
918  }
919 
921  av_freep(&sh->offset);
922  av_freep(&sh->size);
923  sh->entry_point_offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(unsigned));
924  sh->offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
925  sh->size = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
926  if (!sh->entry_point_offset || !sh->offset || !sh->size) {
927  sh->num_entry_point_offsets = 0;
928  av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate memory\n");
929  return AVERROR(ENOMEM);
930  }
931  for (i = 0; i < sh->num_entry_point_offsets; i++) {
932  unsigned val = get_bits_long(gb, offset_len);
933  sh->entry_point_offset[i] = val + 1; // +1; // +1 to get the size
934  }
935  if (s->threads_number > 1 && (s->ps.pps->num_tile_rows > 1 || s->ps.pps->num_tile_columns > 1)) {
936  s->enable_parallel_tiles = 0; // TODO: you can enable tiles in parallel here
937  s->threads_number = 1;
938  } else
939  s->enable_parallel_tiles = 0;
940  } else
941  s->enable_parallel_tiles = 0;
942  }
943 
944  if (s->ps.pps->slice_header_extension_present_flag) {
945  unsigned int length = get_ue_golomb_long(gb);
946  if (length*8LL > get_bits_left(gb)) {
947  av_log(s->avctx, AV_LOG_ERROR, "too many slice_header_extension_data_bytes\n");
948  return AVERROR_INVALIDDATA;
949  }
950  for (i = 0; i < length; i++)
951  skip_bits(gb, 8); // slice_header_extension_data_byte
952  }
953 
954  // Inferred parameters
955  sh->slice_qp = 26U + s->ps.pps->pic_init_qp_minus26 + sh->slice_qp_delta;
956  if (sh->slice_qp > 51 ||
957  sh->slice_qp < -s->ps.sps->qp_bd_offset) {
958  av_log(s->avctx, AV_LOG_ERROR,
959  "The slice_qp %d is outside the valid range "
960  "[%d, 51].\n",
961  sh->slice_qp,
962  -s->ps.sps->qp_bd_offset);
963  return AVERROR_INVALIDDATA;
964  }
965 
967 
968  if (!s->sh.slice_ctb_addr_rs && s->sh.dependent_slice_segment_flag) {
969  av_log(s->avctx, AV_LOG_ERROR, "Impossible slice segment.\n");
970  return AVERROR_INVALIDDATA;
971  }
972 
973  if (get_bits_left(gb) < 0) {
974  av_log(s->avctx, AV_LOG_ERROR,
975  "Overread slice header by %d bits\n", -get_bits_left(gb));
976  return AVERROR_INVALIDDATA;
977  }
978 
979  s->HEVClc->first_qp_group = !s->sh.dependent_slice_segment_flag;
980 
981  if (!s->ps.pps->cu_qp_delta_enabled_flag)
982  s->HEVClc->qp_y = s->sh.slice_qp;
983 
984  s->slice_initialized = 1;
985  s->HEVClc->tu.cu_qp_offset_cb = 0;
986  s->HEVClc->tu.cu_qp_offset_cr = 0;
987 
988  return 0;
989 }
990 
991 #define CTB(tab, x, y) ((tab)[(y) * s->ps.sps->ctb_width + (x)])
992 
993 #define SET_SAO(elem, value) \
994 do { \
995  if (!sao_merge_up_flag && !sao_merge_left_flag) \
996  sao->elem = value; \
997  else if (sao_merge_left_flag) \
998  sao->elem = CTB(s->sao, rx-1, ry).elem; \
999  else if (sao_merge_up_flag) \
1000  sao->elem = CTB(s->sao, rx, ry-1).elem; \
1001  else \
1002  sao->elem = 0; \
1003 } while (0)
1004 
1005 static void hls_sao_param(HEVCLocalContext *lc, int rx, int ry)
1006 {
1007  const HEVCContext *const s = lc->parent;
1008  int sao_merge_left_flag = 0;
1009  int sao_merge_up_flag = 0;
1010  SAOParams *sao = &CTB(s->sao, rx, ry);
1011  int c_idx, i;
1012 
1013  if (s->sh.slice_sample_adaptive_offset_flag[0] ||
1014  s->sh.slice_sample_adaptive_offset_flag[1]) {
1015  if (rx > 0) {
1016  if (lc->ctb_left_flag)
1017  sao_merge_left_flag = ff_hevc_sao_merge_flag_decode(lc);
1018  }
1019  if (ry > 0 && !sao_merge_left_flag) {
1020  if (lc->ctb_up_flag)
1021  sao_merge_up_flag = ff_hevc_sao_merge_flag_decode(lc);
1022  }
1023  }
1024 
1025  for (c_idx = 0; c_idx < (s->ps.sps->chroma_format_idc ? 3 : 1); c_idx++) {
1026  int log2_sao_offset_scale = c_idx == 0 ? s->ps.pps->log2_sao_offset_scale_luma :
1027  s->ps.pps->log2_sao_offset_scale_chroma;
1028 
1029  if (!s->sh.slice_sample_adaptive_offset_flag[c_idx]) {
1030  sao->type_idx[c_idx] = SAO_NOT_APPLIED;
1031  continue;
1032  }
1033 
1034  if (c_idx == 2) {
1035  sao->type_idx[2] = sao->type_idx[1];
1036  sao->eo_class[2] = sao->eo_class[1];
1037  } else {
1038  SET_SAO(type_idx[c_idx], ff_hevc_sao_type_idx_decode(lc));
1039  }
1040 
1041  if (sao->type_idx[c_idx] == SAO_NOT_APPLIED)
1042  continue;
1043 
1044  for (i = 0; i < 4; i++)
1045  SET_SAO(offset_abs[c_idx][i], ff_hevc_sao_offset_abs_decode(lc));
1046 
1047  if (sao->type_idx[c_idx] == SAO_BAND) {
1048  for (i = 0; i < 4; i++) {
1049  if (sao->offset_abs[c_idx][i]) {
1050  SET_SAO(offset_sign[c_idx][i],
1052  } else {
1053  sao->offset_sign[c_idx][i] = 0;
1054  }
1055  }
1056  SET_SAO(band_position[c_idx], ff_hevc_sao_band_position_decode(lc));
1057  } else if (c_idx != 2) {
1058  SET_SAO(eo_class[c_idx], ff_hevc_sao_eo_class_decode(lc));
1059  }
1060 
1061  // Inferred parameters
1062  sao->offset_val[c_idx][0] = 0;
1063  for (i = 0; i < 4; i++) {
1064  sao->offset_val[c_idx][i + 1] = sao->offset_abs[c_idx][i];
1065  if (sao->type_idx[c_idx] == SAO_EDGE) {
1066  if (i > 1)
1067  sao->offset_val[c_idx][i + 1] = -sao->offset_val[c_idx][i + 1];
1068  } else if (sao->offset_sign[c_idx][i]) {
1069  sao->offset_val[c_idx][i + 1] = -sao->offset_val[c_idx][i + 1];
1070  }
1071  sao->offset_val[c_idx][i + 1] *= 1 << log2_sao_offset_scale;
1072  }
1073  }
1074 }
1075 
1076 #undef SET_SAO
1077 #undef CTB
1078 
1080 {
1081  int log2_res_scale_abs_plus1 = ff_hevc_log2_res_scale_abs(lc, idx);
1082 
1083  if (log2_res_scale_abs_plus1 != 0) {
1084  int res_scale_sign_flag = ff_hevc_res_scale_sign_flag(lc, idx);
1085  lc->tu.res_scale_val = (1 << (log2_res_scale_abs_plus1 - 1)) *
1086  (1 - 2 * res_scale_sign_flag);
1087  } else {
1088  lc->tu.res_scale_val = 0;
1089  }
1090 
1091 
1092  return 0;
1093 }
1094 
1095 static int hls_transform_unit(HEVCLocalContext *lc, int x0, int y0,
1096  int xBase, int yBase, int cb_xBase, int cb_yBase,
1097  int log2_cb_size, int log2_trafo_size,
1098  int blk_idx, int cbf_luma, int *cbf_cb, int *cbf_cr)
1099 {
1100  const HEVCContext *const s = lc->parent;
1101  const int log2_trafo_size_c = log2_trafo_size - s->ps.sps->hshift[1];
1102  int i;
1103 
1104  if (lc->cu.pred_mode == MODE_INTRA) {
1105  int trafo_size = 1 << log2_trafo_size;
1106  ff_hevc_set_neighbour_available(lc, x0, y0, trafo_size, trafo_size);
1107 
1108  s->hpc.intra_pred[log2_trafo_size - 2](lc, x0, y0, 0);
1109  }
1110 
1111  if (cbf_luma || cbf_cb[0] || cbf_cr[0] ||
1112  (s->ps.sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1]))) {
1113  int scan_idx = SCAN_DIAG;
1114  int scan_idx_c = SCAN_DIAG;
1115  int cbf_chroma = cbf_cb[0] || cbf_cr[0] ||
1116  (s->ps.sps->chroma_format_idc == 2 &&
1117  (cbf_cb[1] || cbf_cr[1]));
1118 
1119  if (s->ps.pps->cu_qp_delta_enabled_flag && !lc->tu.is_cu_qp_delta_coded) {
1121  if (lc->tu.cu_qp_delta != 0)
1122  if (ff_hevc_cu_qp_delta_sign_flag(lc) == 1)
1123  lc->tu.cu_qp_delta = -lc->tu.cu_qp_delta;
1124  lc->tu.is_cu_qp_delta_coded = 1;
1125 
1126  if (lc->tu.cu_qp_delta < -(26 + s->ps.sps->qp_bd_offset / 2) ||
1127  lc->tu.cu_qp_delta > (25 + s->ps.sps->qp_bd_offset / 2)) {
1128  av_log(s->avctx, AV_LOG_ERROR,
1129  "The cu_qp_delta %d is outside the valid range "
1130  "[%d, %d].\n",
1131  lc->tu.cu_qp_delta,
1132  -(26 + s->ps.sps->qp_bd_offset / 2),
1133  (25 + s->ps.sps->qp_bd_offset / 2));
1134  return AVERROR_INVALIDDATA;
1135  }
1136 
1137  ff_hevc_set_qPy(lc, cb_xBase, cb_yBase, log2_cb_size);
1138  }
1139 
1140  if (s->sh.cu_chroma_qp_offset_enabled_flag && cbf_chroma &&
1142  int cu_chroma_qp_offset_flag = ff_hevc_cu_chroma_qp_offset_flag(lc);
1143  if (cu_chroma_qp_offset_flag) {
1144  int cu_chroma_qp_offset_idx = 0;
1145  if (s->ps.pps->chroma_qp_offset_list_len_minus1 > 0) {
1146  cu_chroma_qp_offset_idx = ff_hevc_cu_chroma_qp_offset_idx(lc);
1147  av_log(s->avctx, AV_LOG_ERROR,
1148  "cu_chroma_qp_offset_idx not yet tested.\n");
1149  }
1150  lc->tu.cu_qp_offset_cb = s->ps.pps->cb_qp_offset_list[cu_chroma_qp_offset_idx];
1151  lc->tu.cu_qp_offset_cr = s->ps.pps->cr_qp_offset_list[cu_chroma_qp_offset_idx];
1152  } else {
1153  lc->tu.cu_qp_offset_cb = 0;
1154  lc->tu.cu_qp_offset_cr = 0;
1155  }
1157  }
1158 
1159  if (lc->cu.pred_mode == MODE_INTRA && log2_trafo_size < 4) {
1160  if (lc->tu.intra_pred_mode >= 6 &&
1161  lc->tu.intra_pred_mode <= 14) {
1162  scan_idx = SCAN_VERT;
1163  } else if (lc->tu.intra_pred_mode >= 22 &&
1164  lc->tu.intra_pred_mode <= 30) {
1165  scan_idx = SCAN_HORIZ;
1166  }
1167 
1168  if (lc->tu.intra_pred_mode_c >= 6 &&
1169  lc->tu.intra_pred_mode_c <= 14) {
1170  scan_idx_c = SCAN_VERT;
1171  } else if (lc->tu.intra_pred_mode_c >= 22 &&
1172  lc->tu.intra_pred_mode_c <= 30) {
1173  scan_idx_c = SCAN_HORIZ;
1174  }
1175  }
1176 
1177  lc->tu.cross_pf = 0;
1178 
1179  if (cbf_luma)
1180  ff_hevc_hls_residual_coding(lc, x0, y0, log2_trafo_size, scan_idx, 0);
1181  if (s->ps.sps->chroma_format_idc && (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3)) {
1182  int trafo_size_h = 1 << (log2_trafo_size_c + s->ps.sps->hshift[1]);
1183  int trafo_size_v = 1 << (log2_trafo_size_c + s->ps.sps->vshift[1]);
1184  lc->tu.cross_pf = (s->ps.pps->cross_component_prediction_enabled_flag && cbf_luma &&
1185  (lc->cu.pred_mode == MODE_INTER ||
1186  (lc->tu.chroma_mode_c == 4)));
1187 
1188  if (lc->tu.cross_pf) {
1189  hls_cross_component_pred(lc, 0);
1190  }
1191  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1192  if (lc->cu.pred_mode == MODE_INTRA) {
1193  ff_hevc_set_neighbour_available(lc, x0, y0 + (i << log2_trafo_size_c), trafo_size_h, trafo_size_v);
1194  s->hpc.intra_pred[log2_trafo_size_c - 2](lc, x0, y0 + (i << log2_trafo_size_c), 1);
1195  }
1196  if (cbf_cb[i])
1197  ff_hevc_hls_residual_coding(lc, x0, y0 + (i << log2_trafo_size_c),
1198  log2_trafo_size_c, scan_idx_c, 1);
1199  else
1200  if (lc->tu.cross_pf) {
1201  ptrdiff_t stride = s->frame->linesize[1];
1202  int hshift = s->ps.sps->hshift[1];
1203  int vshift = s->ps.sps->vshift[1];
1204  const int16_t *coeffs_y = (int16_t*)lc->edge_emu_buffer;
1205  int16_t *coeffs = (int16_t*)lc->edge_emu_buffer2;
1206  int size = 1 << log2_trafo_size_c;
1207 
1208  uint8_t *dst = &s->frame->data[1][(y0 >> vshift) * stride +
1209  ((x0 >> hshift) << s->ps.sps->pixel_shift)];
1210  for (i = 0; i < (size * size); i++) {
1211  coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3);
1212  }
1213  s->hevcdsp.add_residual[log2_trafo_size_c-2](dst, coeffs, stride);
1214  }
1215  }
1216 
1217  if (lc->tu.cross_pf) {
1218  hls_cross_component_pred(lc, 1);
1219  }
1220  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1221  if (lc->cu.pred_mode == MODE_INTRA) {
1222  ff_hevc_set_neighbour_available(lc, x0, y0 + (i << log2_trafo_size_c),
1223  trafo_size_h, trafo_size_v);
1224  s->hpc.intra_pred[log2_trafo_size_c - 2](lc, x0, y0 + (i << log2_trafo_size_c), 2);
1225  }
1226  if (cbf_cr[i])
1227  ff_hevc_hls_residual_coding(lc, x0, y0 + (i << log2_trafo_size_c),
1228  log2_trafo_size_c, scan_idx_c, 2);
1229  else
1230  if (lc->tu.cross_pf) {
1231  ptrdiff_t stride = s->frame->linesize[2];
1232  int hshift = s->ps.sps->hshift[2];
1233  int vshift = s->ps.sps->vshift[2];
1234  const int16_t *coeffs_y = (int16_t*)lc->edge_emu_buffer;
1235  int16_t *coeffs = (int16_t*)lc->edge_emu_buffer2;
1236  int size = 1 << log2_trafo_size_c;
1237 
1238  uint8_t *dst = &s->frame->data[2][(y0 >> vshift) * stride +
1239  ((x0 >> hshift) << s->ps.sps->pixel_shift)];
1240  for (i = 0; i < (size * size); i++) {
1241  coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3);
1242  }
1243  s->hevcdsp.add_residual[log2_trafo_size_c-2](dst, coeffs, stride);
1244  }
1245  }
1246  } else if (s->ps.sps->chroma_format_idc && blk_idx == 3) {
1247  int trafo_size_h = 1 << (log2_trafo_size + 1);
1248  int trafo_size_v = 1 << (log2_trafo_size + s->ps.sps->vshift[1]);
1249  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1250  if (lc->cu.pred_mode == MODE_INTRA) {
1251  ff_hevc_set_neighbour_available(lc, xBase, yBase + (i << log2_trafo_size),
1252  trafo_size_h, trafo_size_v);
1253  s->hpc.intra_pred[log2_trafo_size - 2](lc, xBase, yBase + (i << log2_trafo_size), 1);
1254  }
1255  if (cbf_cb[i])
1256  ff_hevc_hls_residual_coding(lc, xBase, yBase + (i << log2_trafo_size),
1257  log2_trafo_size, scan_idx_c, 1);
1258  }
1259  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1260  if (lc->cu.pred_mode == MODE_INTRA) {
1261  ff_hevc_set_neighbour_available(lc, xBase, yBase + (i << log2_trafo_size),
1262  trafo_size_h, trafo_size_v);
1263  s->hpc.intra_pred[log2_trafo_size - 2](lc, xBase, yBase + (i << log2_trafo_size), 2);
1264  }
1265  if (cbf_cr[i])
1266  ff_hevc_hls_residual_coding(lc, xBase, yBase + (i << log2_trafo_size),
1267  log2_trafo_size, scan_idx_c, 2);
1268  }
1269  }
1270  } else if (s->ps.sps->chroma_format_idc && lc->cu.pred_mode == MODE_INTRA) {
1271  if (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3) {
1272  int trafo_size_h = 1 << (log2_trafo_size_c + s->ps.sps->hshift[1]);
1273  int trafo_size_v = 1 << (log2_trafo_size_c + s->ps.sps->vshift[1]);
1274  ff_hevc_set_neighbour_available(lc, x0, y0, trafo_size_h, trafo_size_v);
1275  s->hpc.intra_pred[log2_trafo_size_c - 2](lc, x0, y0, 1);
1276  s->hpc.intra_pred[log2_trafo_size_c - 2](lc, x0, y0, 2);
1277  if (s->ps.sps->chroma_format_idc == 2) {
1278  ff_hevc_set_neighbour_available(lc, x0, y0 + (1 << log2_trafo_size_c),
1279  trafo_size_h, trafo_size_v);
1280  s->hpc.intra_pred[log2_trafo_size_c - 2](lc, x0, y0 + (1 << log2_trafo_size_c), 1);
1281  s->hpc.intra_pred[log2_trafo_size_c - 2](lc, x0, y0 + (1 << log2_trafo_size_c), 2);
1282  }
1283  } else if (blk_idx == 3) {
1284  int trafo_size_h = 1 << (log2_trafo_size + 1);
1285  int trafo_size_v = 1 << (log2_trafo_size + s->ps.sps->vshift[1]);
1286  ff_hevc_set_neighbour_available(lc, xBase, yBase,
1287  trafo_size_h, trafo_size_v);
1288  s->hpc.intra_pred[log2_trafo_size - 2](lc, xBase, yBase, 1);
1289  s->hpc.intra_pred[log2_trafo_size - 2](lc, xBase, yBase, 2);
1290  if (s->ps.sps->chroma_format_idc == 2) {
1291  ff_hevc_set_neighbour_available(lc, xBase, yBase + (1 << log2_trafo_size),
1292  trafo_size_h, trafo_size_v);
1293  s->hpc.intra_pred[log2_trafo_size - 2](lc, xBase, yBase + (1 << log2_trafo_size), 1);
1294  s->hpc.intra_pred[log2_trafo_size - 2](lc, xBase, yBase + (1 << log2_trafo_size), 2);
1295  }
1296  }
1297  }
1298 
1299  return 0;
1300 }
1301 
1302 static void set_deblocking_bypass(const HEVCContext *s, int x0, int y0, int log2_cb_size)
1303 {
1304  int cb_size = 1 << log2_cb_size;
1305  int log2_min_pu_size = s->ps.sps->log2_min_pu_size;
1306 
1307  int min_pu_width = s->ps.sps->min_pu_width;
1308  int x_end = FFMIN(x0 + cb_size, s->ps.sps->width);
1309  int y_end = FFMIN(y0 + cb_size, s->ps.sps->height);
1310  int i, j;
1311 
1312  for (j = (y0 >> log2_min_pu_size); j < (y_end >> log2_min_pu_size); j++)
1313  for (i = (x0 >> log2_min_pu_size); i < (x_end >> log2_min_pu_size); i++)
1314  s->is_pcm[i + j * min_pu_width] = 2;
1315 }
1316 
1317 static int hls_transform_tree(HEVCLocalContext *lc, int x0, int y0,
1318  int xBase, int yBase, int cb_xBase, int cb_yBase,
1319  int log2_cb_size, int log2_trafo_size,
1320  int trafo_depth, int blk_idx,
1321  const int *base_cbf_cb, const int *base_cbf_cr)
1322 {
1323  const HEVCContext *const s = lc->parent;
1324  uint8_t split_transform_flag;
1325  int cbf_cb[2];
1326  int cbf_cr[2];
1327  int ret;
1328 
1329  cbf_cb[0] = base_cbf_cb[0];
1330  cbf_cb[1] = base_cbf_cb[1];
1331  cbf_cr[0] = base_cbf_cr[0];
1332  cbf_cr[1] = base_cbf_cr[1];
1333 
1334  if (lc->cu.intra_split_flag) {
1335  if (trafo_depth == 1) {
1336  lc->tu.intra_pred_mode = lc->pu.intra_pred_mode[blk_idx];
1337  if (s->ps.sps->chroma_format_idc == 3) {
1338  lc->tu.intra_pred_mode_c = lc->pu.intra_pred_mode_c[blk_idx];
1339  lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[blk_idx];
1340  } else {
1342  lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[0];
1343  }
1344  }
1345  } else {
1346  lc->tu.intra_pred_mode = lc->pu.intra_pred_mode[0];
1348  lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[0];
1349  }
1350 
1351  if (log2_trafo_size <= s->ps.sps->log2_max_trafo_size &&
1352  log2_trafo_size > s->ps.sps->log2_min_tb_size &&
1353  trafo_depth < lc->cu.max_trafo_depth &&
1354  !(lc->cu.intra_split_flag && trafo_depth == 0)) {
1355  split_transform_flag = ff_hevc_split_transform_flag_decode(lc, log2_trafo_size);
1356  } else {
1357  int inter_split = s->ps.sps->max_transform_hierarchy_depth_inter == 0 &&
1358  lc->cu.pred_mode == MODE_INTER &&
1359  lc->cu.part_mode != PART_2Nx2N &&
1360  trafo_depth == 0;
1361 
1362  split_transform_flag = log2_trafo_size > s->ps.sps->log2_max_trafo_size ||
1363  (lc->cu.intra_split_flag && trafo_depth == 0) ||
1364  inter_split;
1365  }
1366 
1367  if (s->ps.sps->chroma_format_idc && (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3)) {
1368  if (trafo_depth == 0 || cbf_cb[0]) {
1369  cbf_cb[0] = ff_hevc_cbf_cb_cr_decode(lc, trafo_depth);
1370  if (s->ps.sps->chroma_format_idc == 2 && (!split_transform_flag || log2_trafo_size == 3)) {
1371  cbf_cb[1] = ff_hevc_cbf_cb_cr_decode(lc, trafo_depth);
1372  }
1373  }
1374 
1375  if (trafo_depth == 0 || cbf_cr[0]) {
1376  cbf_cr[0] = ff_hevc_cbf_cb_cr_decode(lc, trafo_depth);
1377  if (s->ps.sps->chroma_format_idc == 2 && (!split_transform_flag || log2_trafo_size == 3)) {
1378  cbf_cr[1] = ff_hevc_cbf_cb_cr_decode(lc, trafo_depth);
1379  }
1380  }
1381  }
1382 
1383  if (split_transform_flag) {
1384  const int trafo_size_split = 1 << (log2_trafo_size - 1);
1385  const int x1 = x0 + trafo_size_split;
1386  const int y1 = y0 + trafo_size_split;
1387 
1388 #define SUBDIVIDE(x, y, idx) \
1389 do { \
1390  ret = hls_transform_tree(lc, x, y, x0, y0, cb_xBase, cb_yBase, log2_cb_size,\
1391  log2_trafo_size - 1, trafo_depth + 1, idx, \
1392  cbf_cb, cbf_cr); \
1393  if (ret < 0) \
1394  return ret; \
1395 } while (0)
1396 
1397  SUBDIVIDE(x0, y0, 0);
1398  SUBDIVIDE(x1, y0, 1);
1399  SUBDIVIDE(x0, y1, 2);
1400  SUBDIVIDE(x1, y1, 3);
1401 
1402 #undef SUBDIVIDE
1403  } else {
1404  int min_tu_size = 1 << s->ps.sps->log2_min_tb_size;
1405  int log2_min_tu_size = s->ps.sps->log2_min_tb_size;
1406  int min_tu_width = s->ps.sps->min_tb_width;
1407  int cbf_luma = 1;
1408 
1409  if (lc->cu.pred_mode == MODE_INTRA || trafo_depth != 0 ||
1410  cbf_cb[0] || cbf_cr[0] ||
1411  (s->ps.sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1]))) {
1412  cbf_luma = ff_hevc_cbf_luma_decode(lc, trafo_depth);
1413  }
1414 
1415  ret = hls_transform_unit(lc, x0, y0, xBase, yBase, cb_xBase, cb_yBase,
1416  log2_cb_size, log2_trafo_size,
1417  blk_idx, cbf_luma, cbf_cb, cbf_cr);
1418  if (ret < 0)
1419  return ret;
1420  // TODO: store cbf_luma somewhere else
1421  if (cbf_luma) {
1422  int i, j;
1423  for (i = 0; i < (1 << log2_trafo_size); i += min_tu_size)
1424  for (j = 0; j < (1 << log2_trafo_size); j += min_tu_size) {
1425  int x_tu = (x0 + j) >> log2_min_tu_size;
1426  int y_tu = (y0 + i) >> log2_min_tu_size;
1427  s->cbf_luma[y_tu * min_tu_width + x_tu] = 1;
1428  }
1429  }
1430  if (!s->sh.disable_deblocking_filter_flag) {
1431  ff_hevc_deblocking_boundary_strengths(lc, x0, y0, log2_trafo_size);
1432  if (s->ps.pps->transquant_bypass_enable_flag &&
1434  set_deblocking_bypass(s, x0, y0, log2_trafo_size);
1435  }
1436  }
1437  return 0;
1438 }
1439 
1440 static int hls_pcm_sample(HEVCLocalContext *lc, int x0, int y0, int log2_cb_size)
1441 {
1442  const HEVCContext *const s = lc->parent;
1443  GetBitContext gb;
1444  int cb_size = 1 << log2_cb_size;
1445  ptrdiff_t stride0 = s->frame->linesize[0];
1446  ptrdiff_t stride1 = s->frame->linesize[1];
1447  ptrdiff_t stride2 = s->frame->linesize[2];
1448  uint8_t *dst0 = &s->frame->data[0][y0 * stride0 + (x0 << s->ps.sps->pixel_shift)];
1449  uint8_t *dst1 = &s->frame->data[1][(y0 >> s->ps.sps->vshift[1]) * stride1 + ((x0 >> s->ps.sps->hshift[1]) << s->ps.sps->pixel_shift)];
1450  uint8_t *dst2 = &s->frame->data[2][(y0 >> s->ps.sps->vshift[2]) * stride2 + ((x0 >> s->ps.sps->hshift[2]) << s->ps.sps->pixel_shift)];
1451 
1452  int length = cb_size * cb_size * s->ps.sps->pcm.bit_depth +
1453  (((cb_size >> s->ps.sps->hshift[1]) * (cb_size >> s->ps.sps->vshift[1])) +
1454  ((cb_size >> s->ps.sps->hshift[2]) * (cb_size >> s->ps.sps->vshift[2]))) *
1455  s->ps.sps->pcm.bit_depth_chroma;
1456  const uint8_t *pcm = skip_bytes(&lc->cc, (length + 7) >> 3);
1457  int ret;
1458 
1459  if (!s->sh.disable_deblocking_filter_flag)
1460  ff_hevc_deblocking_boundary_strengths(lc, x0, y0, log2_cb_size);
1461 
1462  ret = init_get_bits(&gb, pcm, length);
1463  if (ret < 0)
1464  return ret;
1465 
1466  s->hevcdsp.put_pcm(dst0, stride0, cb_size, cb_size, &gb, s->ps.sps->pcm.bit_depth);
1467  if (s->ps.sps->chroma_format_idc) {
1468  s->hevcdsp.put_pcm(dst1, stride1,
1469  cb_size >> s->ps.sps->hshift[1],
1470  cb_size >> s->ps.sps->vshift[1],
1471  &gb, s->ps.sps->pcm.bit_depth_chroma);
1472  s->hevcdsp.put_pcm(dst2, stride2,
1473  cb_size >> s->ps.sps->hshift[2],
1474  cb_size >> s->ps.sps->vshift[2],
1475  &gb, s->ps.sps->pcm.bit_depth_chroma);
1476  }
1477 
1478  return 0;
1479 }
1480 
1481 /**
1482  * 8.5.3.2.2.1 Luma sample unidirectional interpolation process
1483  *
1484  * @param s HEVC decoding context
1485  * @param dst target buffer for block data at block position
1486  * @param dststride stride of the dst buffer
1487  * @param ref reference picture buffer at origin (0, 0)
1488  * @param mv motion vector (relative to block position) to get pixel data from
1489  * @param x_off horizontal position of block from origin (0, 0)
1490  * @param y_off vertical position of block from origin (0, 0)
1491  * @param block_w width of block
1492  * @param block_h height of block
1493  * @param luma_weight weighting factor applied to the luma prediction
1494  * @param luma_offset additive offset applied to the luma prediction value
1495  */
1496 
1497 static void luma_mc_uni(HEVCLocalContext *lc, uint8_t *dst, ptrdiff_t dststride,
1498  const AVFrame *ref, const Mv *mv, int x_off, int y_off,
1499  int block_w, int block_h, int luma_weight, int luma_offset)
1500 {
1501  const HEVCContext *const s = lc->parent;
1502  const uint8_t *src = ref->data[0];
1503  ptrdiff_t srcstride = ref->linesize[0];
1504  int pic_width = s->ps.sps->width;
1505  int pic_height = s->ps.sps->height;
1506  int mx = mv->x & 3;
1507  int my = mv->y & 3;
1508  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1509  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1510  int idx = hevc_pel_weight[block_w];
1511 
1512  x_off += mv->x >> 2;
1513  y_off += mv->y >> 2;
1514  src += y_off * srcstride + (x_off * (1 << s->ps.sps->pixel_shift));
1515 
1516  if (x_off < QPEL_EXTRA_BEFORE || y_off < QPEL_EXTRA_AFTER ||
1517  x_off >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1518  y_off >= pic_height - block_h - QPEL_EXTRA_AFTER) {
1519  const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1520  int offset = QPEL_EXTRA_BEFORE * srcstride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1521  int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1522 
1523  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src - offset,
1524  edge_emu_stride, srcstride,
1525  block_w + QPEL_EXTRA,
1526  block_h + QPEL_EXTRA,
1527  x_off - QPEL_EXTRA_BEFORE, y_off - QPEL_EXTRA_BEFORE,
1528  pic_width, pic_height);
1529  src = lc->edge_emu_buffer + buf_offset;
1530  srcstride = edge_emu_stride;
1531  }
1532 
1533  if (!weight_flag)
1534  s->hevcdsp.put_hevc_qpel_uni[idx][!!my][!!mx](dst, dststride, src, srcstride,
1535  block_h, mx, my, block_w);
1536  else
1537  s->hevcdsp.put_hevc_qpel_uni_w[idx][!!my][!!mx](dst, dststride, src, srcstride,
1538  block_h, s->sh.luma_log2_weight_denom,
1539  luma_weight, luma_offset, mx, my, block_w);
1540 }
1541 
1542 /**
1543  * 8.5.3.2.2.1 Luma sample bidirectional interpolation process
1544  *
1545  * @param s HEVC decoding context
1546  * @param dst target buffer for block data at block position
1547  * @param dststride stride of the dst buffer
1548  * @param ref0 reference picture0 buffer at origin (0, 0)
1549  * @param mv0 motion vector0 (relative to block position) to get pixel data from
1550  * @param x_off horizontal position of block from origin (0, 0)
1551  * @param y_off vertical position of block from origin (0, 0)
1552  * @param block_w width of block
1553  * @param block_h height of block
1554  * @param ref1 reference picture1 buffer at origin (0, 0)
1555  * @param mv1 motion vector1 (relative to block position) to get pixel data from
1556  * @param current_mv current motion vector structure
1557  */
1558  static void luma_mc_bi(HEVCLocalContext *lc, uint8_t *dst, ptrdiff_t dststride,
1559  const AVFrame *ref0, const Mv *mv0, int x_off, int y_off,
1560  int block_w, int block_h, const AVFrame *ref1,
1561  const Mv *mv1, struct MvField *current_mv)
1562 {
1563  const HEVCContext *const s = lc->parent;
1564  ptrdiff_t src0stride = ref0->linesize[0];
1565  ptrdiff_t src1stride = ref1->linesize[0];
1566  int pic_width = s->ps.sps->width;
1567  int pic_height = s->ps.sps->height;
1568  int mx0 = mv0->x & 3;
1569  int my0 = mv0->y & 3;
1570  int mx1 = mv1->x & 3;
1571  int my1 = mv1->y & 3;
1572  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1573  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1574  int x_off0 = x_off + (mv0->x >> 2);
1575  int y_off0 = y_off + (mv0->y >> 2);
1576  int x_off1 = x_off + (mv1->x >> 2);
1577  int y_off1 = y_off + (mv1->y >> 2);
1578  int idx = hevc_pel_weight[block_w];
1579 
1580  const uint8_t *src0 = ref0->data[0] + y_off0 * src0stride + (int)((unsigned)x_off0 << s->ps.sps->pixel_shift);
1581  const uint8_t *src1 = ref1->data[0] + y_off1 * src1stride + (int)((unsigned)x_off1 << s->ps.sps->pixel_shift);
1582 
1583  if (x_off0 < QPEL_EXTRA_BEFORE || y_off0 < QPEL_EXTRA_AFTER ||
1584  x_off0 >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1585  y_off0 >= pic_height - block_h - QPEL_EXTRA_AFTER) {
1586  const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1587  int offset = QPEL_EXTRA_BEFORE * src0stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1588  int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1589 
1590  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src0 - offset,
1591  edge_emu_stride, src0stride,
1592  block_w + QPEL_EXTRA,
1593  block_h + QPEL_EXTRA,
1594  x_off0 - QPEL_EXTRA_BEFORE, y_off0 - QPEL_EXTRA_BEFORE,
1595  pic_width, pic_height);
1596  src0 = lc->edge_emu_buffer + buf_offset;
1597  src0stride = edge_emu_stride;
1598  }
1599 
1600  if (x_off1 < QPEL_EXTRA_BEFORE || y_off1 < QPEL_EXTRA_AFTER ||
1601  x_off1 >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1602  y_off1 >= pic_height - block_h - QPEL_EXTRA_AFTER) {
1603  const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1604  int offset = QPEL_EXTRA_BEFORE * src1stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1605  int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1606 
1607  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer2, src1 - offset,
1608  edge_emu_stride, src1stride,
1609  block_w + QPEL_EXTRA,
1610  block_h + QPEL_EXTRA,
1611  x_off1 - QPEL_EXTRA_BEFORE, y_off1 - QPEL_EXTRA_BEFORE,
1612  pic_width, pic_height);
1613  src1 = lc->edge_emu_buffer2 + buf_offset;
1614  src1stride = edge_emu_stride;
1615  }
1616 
1617  s->hevcdsp.put_hevc_qpel[idx][!!my0][!!mx0](lc->tmp, src0, src0stride,
1618  block_h, mx0, my0, block_w);
1619  if (!weight_flag)
1620  s->hevcdsp.put_hevc_qpel_bi[idx][!!my1][!!mx1](dst, dststride, src1, src1stride, lc->tmp,
1621  block_h, mx1, my1, block_w);
1622  else
1623  s->hevcdsp.put_hevc_qpel_bi_w[idx][!!my1][!!mx1](dst, dststride, src1, src1stride, lc->tmp,
1624  block_h, s->sh.luma_log2_weight_denom,
1625  s->sh.luma_weight_l0[current_mv->ref_idx[0]],
1626  s->sh.luma_weight_l1[current_mv->ref_idx[1]],
1627  s->sh.luma_offset_l0[current_mv->ref_idx[0]],
1628  s->sh.luma_offset_l1[current_mv->ref_idx[1]],
1629  mx1, my1, block_w);
1630 
1631 }
1632 
1633 /**
1634  * 8.5.3.2.2.2 Chroma sample uniprediction interpolation process
1635  *
1636  * @param s HEVC decoding context
1637  * @param dst1 target buffer for block data at block position (U plane)
1638  * @param dst2 target buffer for block data at block position (V plane)
1639  * @param dststride stride of the dst1 and dst2 buffers
1640  * @param ref reference picture buffer at origin (0, 0)
1641  * @param mv motion vector (relative to block position) to get pixel data from
1642  * @param x_off horizontal position of block from origin (0, 0)
1643  * @param y_off vertical position of block from origin (0, 0)
1644  * @param block_w width of block
1645  * @param block_h height of block
1646  * @param chroma_weight weighting factor applied to the chroma prediction
1647  * @param chroma_offset additive offset applied to the chroma prediction value
1648  */
1649 
1650 static void chroma_mc_uni(HEVCLocalContext *lc, uint8_t *dst0,
1651  ptrdiff_t dststride, const uint8_t *src0, ptrdiff_t srcstride, int reflist,
1652  int x_off, int y_off, int block_w, int block_h,
1653  const struct MvField *current_mv, int chroma_weight, int chroma_offset)
1654 {
1655  const HEVCContext *const s = lc->parent;
1656  int pic_width = s->ps.sps->width >> s->ps.sps->hshift[1];
1657  int pic_height = s->ps.sps->height >> s->ps.sps->vshift[1];
1658  const Mv *mv = &current_mv->mv[reflist];
1659  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1660  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1661  int idx = hevc_pel_weight[block_w];
1662  int hshift = s->ps.sps->hshift[1];
1663  int vshift = s->ps.sps->vshift[1];
1664  intptr_t mx = av_mod_uintp2(mv->x, 2 + hshift);
1665  intptr_t my = av_mod_uintp2(mv->y, 2 + vshift);
1666  intptr_t _mx = mx << (1 - hshift);
1667  intptr_t _my = my << (1 - vshift);
1668 
1669  x_off += mv->x >> (2 + hshift);
1670  y_off += mv->y >> (2 + vshift);
1671  src0 += y_off * srcstride + (x_off * (1 << s->ps.sps->pixel_shift));
1672 
1673  if (x_off < EPEL_EXTRA_BEFORE || y_off < EPEL_EXTRA_AFTER ||
1674  x_off >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1675  y_off >= pic_height - block_h - EPEL_EXTRA_AFTER) {
1676  const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1677  int offset0 = EPEL_EXTRA_BEFORE * (srcstride + (1 << s->ps.sps->pixel_shift));
1678  int buf_offset0 = EPEL_EXTRA_BEFORE *
1679  (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1680  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src0 - offset0,
1681  edge_emu_stride, srcstride,
1682  block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1683  x_off - EPEL_EXTRA_BEFORE,
1684  y_off - EPEL_EXTRA_BEFORE,
1685  pic_width, pic_height);
1686 
1687  src0 = lc->edge_emu_buffer + buf_offset0;
1688  srcstride = edge_emu_stride;
1689  }
1690  if (!weight_flag)
1691  s->hevcdsp.put_hevc_epel_uni[idx][!!my][!!mx](dst0, dststride, src0, srcstride,
1692  block_h, _mx, _my, block_w);
1693  else
1694  s->hevcdsp.put_hevc_epel_uni_w[idx][!!my][!!mx](dst0, dststride, src0, srcstride,
1695  block_h, s->sh.chroma_log2_weight_denom,
1696  chroma_weight, chroma_offset, _mx, _my, block_w);
1697 }
1698 
1699 /**
1700  * 8.5.3.2.2.2 Chroma sample bidirectional interpolation process
1701  *
1702  * @param s HEVC decoding context
1703  * @param dst target buffer for block data at block position
1704  * @param dststride stride of the dst buffer
1705  * @param ref0 reference picture0 buffer at origin (0, 0)
1706  * @param mv0 motion vector0 (relative to block position) to get pixel data from
1707  * @param x_off horizontal position of block from origin (0, 0)
1708  * @param y_off vertical position of block from origin (0, 0)
1709  * @param block_w width of block
1710  * @param block_h height of block
1711  * @param ref1 reference picture1 buffer at origin (0, 0)
1712  * @param mv1 motion vector1 (relative to block position) to get pixel data from
1713  * @param current_mv current motion vector structure
1714  * @param cidx chroma component(cb, cr)
1715  */
1716 static void chroma_mc_bi(HEVCLocalContext *lc, uint8_t *dst0, ptrdiff_t dststride,
1717  const AVFrame *ref0, const AVFrame *ref1,
1718  int x_off, int y_off, int block_w, int block_h, const MvField *current_mv, int cidx)
1719 {
1720  const HEVCContext *const s = lc->parent;
1721  const uint8_t *src1 = ref0->data[cidx+1];
1722  const uint8_t *src2 = ref1->data[cidx+1];
1723  ptrdiff_t src1stride = ref0->linesize[cidx+1];
1724  ptrdiff_t src2stride = ref1->linesize[cidx+1];
1725  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1726  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1727  int pic_width = s->ps.sps->width >> s->ps.sps->hshift[1];
1728  int pic_height = s->ps.sps->height >> s->ps.sps->vshift[1];
1729  const Mv *const mv0 = &current_mv->mv[0];
1730  const Mv *const mv1 = &current_mv->mv[1];
1731  int hshift = s->ps.sps->hshift[1];
1732  int vshift = s->ps.sps->vshift[1];
1733 
1734  intptr_t mx0 = av_mod_uintp2(mv0->x, 2 + hshift);
1735  intptr_t my0 = av_mod_uintp2(mv0->y, 2 + vshift);
1736  intptr_t mx1 = av_mod_uintp2(mv1->x, 2 + hshift);
1737  intptr_t my1 = av_mod_uintp2(mv1->y, 2 + vshift);
1738  intptr_t _mx0 = mx0 << (1 - hshift);
1739  intptr_t _my0 = my0 << (1 - vshift);
1740  intptr_t _mx1 = mx1 << (1 - hshift);
1741  intptr_t _my1 = my1 << (1 - vshift);
1742 
1743  int x_off0 = x_off + (mv0->x >> (2 + hshift));
1744  int y_off0 = y_off + (mv0->y >> (2 + vshift));
1745  int x_off1 = x_off + (mv1->x >> (2 + hshift));
1746  int y_off1 = y_off + (mv1->y >> (2 + vshift));
1747  int idx = hevc_pel_weight[block_w];
1748  src1 += y_off0 * src1stride + (int)((unsigned)x_off0 << s->ps.sps->pixel_shift);
1749  src2 += y_off1 * src2stride + (int)((unsigned)x_off1 << s->ps.sps->pixel_shift);
1750 
1751  if (x_off0 < EPEL_EXTRA_BEFORE || y_off0 < EPEL_EXTRA_AFTER ||
1752  x_off0 >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1753  y_off0 >= pic_height - block_h - EPEL_EXTRA_AFTER) {
1754  const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1755  int offset1 = EPEL_EXTRA_BEFORE * (src1stride + (1 << s->ps.sps->pixel_shift));
1756  int buf_offset1 = EPEL_EXTRA_BEFORE *
1757  (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1758 
1759  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src1 - offset1,
1760  edge_emu_stride, src1stride,
1761  block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1762  x_off0 - EPEL_EXTRA_BEFORE,
1763  y_off0 - EPEL_EXTRA_BEFORE,
1764  pic_width, pic_height);
1765 
1766  src1 = lc->edge_emu_buffer + buf_offset1;
1767  src1stride = edge_emu_stride;
1768  }
1769 
1770  if (x_off1 < EPEL_EXTRA_BEFORE || y_off1 < EPEL_EXTRA_AFTER ||
1771  x_off1 >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1772  y_off1 >= pic_height - block_h - EPEL_EXTRA_AFTER) {
1773  const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1774  int offset1 = EPEL_EXTRA_BEFORE * (src2stride + (1 << s->ps.sps->pixel_shift));
1775  int buf_offset1 = EPEL_EXTRA_BEFORE *
1776  (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1777 
1778  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer2, src2 - offset1,
1779  edge_emu_stride, src2stride,
1780  block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1781  x_off1 - EPEL_EXTRA_BEFORE,
1782  y_off1 - EPEL_EXTRA_BEFORE,
1783  pic_width, pic_height);
1784 
1785  src2 = lc->edge_emu_buffer2 + buf_offset1;
1786  src2stride = edge_emu_stride;
1787  }
1788 
1789  s->hevcdsp.put_hevc_epel[idx][!!my0][!!mx0](lc->tmp, src1, src1stride,
1790  block_h, _mx0, _my0, block_w);
1791  if (!weight_flag)
1792  s->hevcdsp.put_hevc_epel_bi[idx][!!my1][!!mx1](dst0, s->frame->linesize[cidx+1],
1793  src2, src2stride, lc->tmp,
1794  block_h, _mx1, _my1, block_w);
1795  else
1796  s->hevcdsp.put_hevc_epel_bi_w[idx][!!my1][!!mx1](dst0, s->frame->linesize[cidx+1],
1797  src2, src2stride, lc->tmp,
1798  block_h,
1799  s->sh.chroma_log2_weight_denom,
1800  s->sh.chroma_weight_l0[current_mv->ref_idx[0]][cidx],
1801  s->sh.chroma_weight_l1[current_mv->ref_idx[1]][cidx],
1802  s->sh.chroma_offset_l0[current_mv->ref_idx[0]][cidx],
1803  s->sh.chroma_offset_l1[current_mv->ref_idx[1]][cidx],
1804  _mx1, _my1, block_w);
1805 }
1806 
1807 static void hevc_await_progress(const HEVCContext *s, const HEVCFrame *ref,
1808  const Mv *mv, int y0, int height)
1809 {
1810  if (s->threads_type == FF_THREAD_FRAME ) {
1811  int y = FFMAX(0, (mv->y >> 2) + y0 + height + 9);
1812 
1813  ff_thread_await_progress(&ref->tf, y, 0);
1814  }
1815 }
1816 
1817 static void hevc_luma_mv_mvp_mode(HEVCLocalContext *lc, int x0, int y0, int nPbW,
1818  int nPbH, int log2_cb_size, int part_idx,
1819  int merge_idx, MvField *mv)
1820 {
1821  const HEVCContext *const s = lc->parent;
1822  enum InterPredIdc inter_pred_idc = PRED_L0;
1823  int mvp_flag;
1824 
1825  ff_hevc_set_neighbour_available(lc, x0, y0, nPbW, nPbH);
1826  mv->pred_flag = 0;
1827  if (s->sh.slice_type == HEVC_SLICE_B)
1828  inter_pred_idc = ff_hevc_inter_pred_idc_decode(lc, nPbW, nPbH);
1829 
1830  if (inter_pred_idc != PRED_L1) {
1831  if (s->sh.nb_refs[L0])
1832  mv->ref_idx[0]= ff_hevc_ref_idx_lx_decode(lc, s->sh.nb_refs[L0]);
1833 
1834  mv->pred_flag = PF_L0;
1835  ff_hevc_hls_mvd_coding(lc, x0, y0, 0);
1836  mvp_flag = ff_hevc_mvp_lx_flag_decode(lc);
1837  ff_hevc_luma_mv_mvp_mode(lc, x0, y0, nPbW, nPbH, log2_cb_size,
1838  part_idx, merge_idx, mv, mvp_flag, 0);
1839  mv->mv[0].x += lc->pu.mvd.x;
1840  mv->mv[0].y += lc->pu.mvd.y;
1841  }
1842 
1843  if (inter_pred_idc != PRED_L0) {
1844  if (s->sh.nb_refs[L1])
1845  mv->ref_idx[1]= ff_hevc_ref_idx_lx_decode(lc, s->sh.nb_refs[L1]);
1846 
1847  if (s->sh.mvd_l1_zero_flag == 1 && inter_pred_idc == PRED_BI) {
1848  AV_ZERO32(&lc->pu.mvd);
1849  } else {
1850  ff_hevc_hls_mvd_coding(lc, x0, y0, 1);
1851  }
1852 
1853  mv->pred_flag += PF_L1;
1854  mvp_flag = ff_hevc_mvp_lx_flag_decode(lc);
1855  ff_hevc_luma_mv_mvp_mode(lc, x0, y0, nPbW, nPbH, log2_cb_size,
1856  part_idx, merge_idx, mv, mvp_flag, 1);
1857  mv->mv[1].x += lc->pu.mvd.x;
1858  mv->mv[1].y += lc->pu.mvd.y;
1859  }
1860 }
1861 
1862 static void hls_prediction_unit(HEVCLocalContext *lc, int x0, int y0,
1863  int nPbW, int nPbH,
1864  int log2_cb_size, int partIdx, int idx)
1865 {
1866 #define POS(c_idx, x, y) \
1867  &s->frame->data[c_idx][((y) >> s->ps.sps->vshift[c_idx]) * s->frame->linesize[c_idx] + \
1868  (((x) >> s->ps.sps->hshift[c_idx]) << s->ps.sps->pixel_shift)]
1869  const HEVCContext *const s = lc->parent;
1870  int merge_idx = 0;
1871  struct MvField current_mv = {{{ 0 }}};
1872 
1873  int min_pu_width = s->ps.sps->min_pu_width;
1874 
1875  MvField *tab_mvf = s->ref->tab_mvf;
1876  const RefPicList *refPicList = s->ref->refPicList;
1877  const HEVCFrame *ref0 = NULL, *ref1 = NULL;
1878  uint8_t *dst0 = POS(0, x0, y0);
1879  uint8_t *dst1 = POS(1, x0, y0);
1880  uint8_t *dst2 = POS(2, x0, y0);
1881  int log2_min_cb_size = s->ps.sps->log2_min_cb_size;
1882  int min_cb_width = s->ps.sps->min_cb_width;
1883  int x_cb = x0 >> log2_min_cb_size;
1884  int y_cb = y0 >> log2_min_cb_size;
1885  int x_pu, y_pu;
1886  int i, j;
1887 
1888  int skip_flag = SAMPLE_CTB(s->skip_flag, x_cb, y_cb);
1889 
1890  if (!skip_flag)
1892 
1893  if (skip_flag || lc->pu.merge_flag) {
1894  if (s->sh.max_num_merge_cand > 1)
1895  merge_idx = ff_hevc_merge_idx_decode(lc);
1896  else
1897  merge_idx = 0;
1898 
1899  ff_hevc_luma_mv_merge_mode(lc, x0, y0, nPbW, nPbH, log2_cb_size,
1900  partIdx, merge_idx, &current_mv);
1901  } else {
1902  hevc_luma_mv_mvp_mode(lc, x0, y0, nPbW, nPbH, log2_cb_size,
1903  partIdx, merge_idx, &current_mv);
1904  }
1905 
1906  x_pu = x0 >> s->ps.sps->log2_min_pu_size;
1907  y_pu = y0 >> s->ps.sps->log2_min_pu_size;
1908 
1909  for (j = 0; j < nPbH >> s->ps.sps->log2_min_pu_size; j++)
1910  for (i = 0; i < nPbW >> s->ps.sps->log2_min_pu_size; i++)
1911  tab_mvf[(y_pu + j) * min_pu_width + x_pu + i] = current_mv;
1912 
1913  if (current_mv.pred_flag & PF_L0) {
1914  ref0 = refPicList[0].ref[current_mv.ref_idx[0]];
1915  if (!ref0)
1916  return;
1917  hevc_await_progress(s, ref0, &current_mv.mv[0], y0, nPbH);
1918  }
1919  if (current_mv.pred_flag & PF_L1) {
1920  ref1 = refPicList[1].ref[current_mv.ref_idx[1]];
1921  if (!ref1)
1922  return;
1923  hevc_await_progress(s, ref1, &current_mv.mv[1], y0, nPbH);
1924  }
1925 
1926  if (current_mv.pred_flag == PF_L0) {
1927  int x0_c = x0 >> s->ps.sps->hshift[1];
1928  int y0_c = y0 >> s->ps.sps->vshift[1];
1929  int nPbW_c = nPbW >> s->ps.sps->hshift[1];
1930  int nPbH_c = nPbH >> s->ps.sps->vshift[1];
1931 
1932  luma_mc_uni(lc, dst0, s->frame->linesize[0], ref0->frame,
1933  &current_mv.mv[0], x0, y0, nPbW, nPbH,
1934  s->sh.luma_weight_l0[current_mv.ref_idx[0]],
1935  s->sh.luma_offset_l0[current_mv.ref_idx[0]]);
1936 
1937  if (s->ps.sps->chroma_format_idc) {
1938  chroma_mc_uni(lc, dst1, s->frame->linesize[1], ref0->frame->data[1], ref0->frame->linesize[1],
1939  0, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1940  s->sh.chroma_weight_l0[current_mv.ref_idx[0]][0], s->sh.chroma_offset_l0[current_mv.ref_idx[0]][0]);
1941  chroma_mc_uni(lc, dst2, s->frame->linesize[2], ref0->frame->data[2], ref0->frame->linesize[2],
1942  0, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1943  s->sh.chroma_weight_l0[current_mv.ref_idx[0]][1], s->sh.chroma_offset_l0[current_mv.ref_idx[0]][1]);
1944  }
1945  } else if (current_mv.pred_flag == PF_L1) {
1946  int x0_c = x0 >> s->ps.sps->hshift[1];
1947  int y0_c = y0 >> s->ps.sps->vshift[1];
1948  int nPbW_c = nPbW >> s->ps.sps->hshift[1];
1949  int nPbH_c = nPbH >> s->ps.sps->vshift[1];
1950 
1951  luma_mc_uni(lc, dst0, s->frame->linesize[0], ref1->frame,
1952  &current_mv.mv[1], x0, y0, nPbW, nPbH,
1953  s->sh.luma_weight_l1[current_mv.ref_idx[1]],
1954  s->sh.luma_offset_l1[current_mv.ref_idx[1]]);
1955 
1956  if (s->ps.sps->chroma_format_idc) {
1957  chroma_mc_uni(lc, dst1, s->frame->linesize[1], ref1->frame->data[1], ref1->frame->linesize[1],
1958  1, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1959  s->sh.chroma_weight_l1[current_mv.ref_idx[1]][0], s->sh.chroma_offset_l1[current_mv.ref_idx[1]][0]);
1960 
1961  chroma_mc_uni(lc, dst2, s->frame->linesize[2], ref1->frame->data[2], ref1->frame->linesize[2],
1962  1, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1963  s->sh.chroma_weight_l1[current_mv.ref_idx[1]][1], s->sh.chroma_offset_l1[current_mv.ref_idx[1]][1]);
1964  }
1965  } else if (current_mv.pred_flag == PF_BI) {
1966  int x0_c = x0 >> s->ps.sps->hshift[1];
1967  int y0_c = y0 >> s->ps.sps->vshift[1];
1968  int nPbW_c = nPbW >> s->ps.sps->hshift[1];
1969  int nPbH_c = nPbH >> s->ps.sps->vshift[1];
1970 
1971  luma_mc_bi(lc, dst0, s->frame->linesize[0], ref0->frame,
1972  &current_mv.mv[0], x0, y0, nPbW, nPbH,
1973  ref1->frame, &current_mv.mv[1], &current_mv);
1974 
1975  if (s->ps.sps->chroma_format_idc) {
1976  chroma_mc_bi(lc, dst1, s->frame->linesize[1], ref0->frame, ref1->frame,
1977  x0_c, y0_c, nPbW_c, nPbH_c, &current_mv, 0);
1978 
1979  chroma_mc_bi(lc, dst2, s->frame->linesize[2], ref0->frame, ref1->frame,
1980  x0_c, y0_c, nPbW_c, nPbH_c, &current_mv, 1);
1981  }
1982  }
1983 }
1984 
1985 /**
1986  * 8.4.1
1987  */
1988 static int luma_intra_pred_mode(HEVCLocalContext *lc, int x0, int y0, int pu_size,
1989  int prev_intra_luma_pred_flag)
1990 {
1991  const HEVCContext *const s = lc->parent;
1992  int x_pu = x0 >> s->ps.sps->log2_min_pu_size;
1993  int y_pu = y0 >> s->ps.sps->log2_min_pu_size;
1994  int min_pu_width = s->ps.sps->min_pu_width;
1995  int size_in_pus = pu_size >> s->ps.sps->log2_min_pu_size;
1996  int x0b = av_mod_uintp2(x0, s->ps.sps->log2_ctb_size);
1997  int y0b = av_mod_uintp2(y0, s->ps.sps->log2_ctb_size);
1998 
1999  int cand_up = (lc->ctb_up_flag || y0b) ?
2000  s->tab_ipm[(y_pu - 1) * min_pu_width + x_pu] : INTRA_DC;
2001  int cand_left = (lc->ctb_left_flag || x0b) ?
2002  s->tab_ipm[y_pu * min_pu_width + x_pu - 1] : INTRA_DC;
2003 
2004  int y_ctb = (y0 >> (s->ps.sps->log2_ctb_size)) << (s->ps.sps->log2_ctb_size);
2005 
2006  MvField *tab_mvf = s->ref->tab_mvf;
2007  int intra_pred_mode;
2008  int candidate[3];
2009  int i, j;
2010 
2011  // intra_pred_mode prediction does not cross vertical CTB boundaries
2012  if ((y0 - 1) < y_ctb)
2013  cand_up = INTRA_DC;
2014 
2015  if (cand_left == cand_up) {
2016  if (cand_left < 2) {
2017  candidate[0] = INTRA_PLANAR;
2018  candidate[1] = INTRA_DC;
2019  candidate[2] = INTRA_ANGULAR_26;
2020  } else {
2021  candidate[0] = cand_left;
2022  candidate[1] = 2 + ((cand_left - 2 - 1 + 32) & 31);
2023  candidate[2] = 2 + ((cand_left - 2 + 1) & 31);
2024  }
2025  } else {
2026  candidate[0] = cand_left;
2027  candidate[1] = cand_up;
2028  if (candidate[0] != INTRA_PLANAR && candidate[1] != INTRA_PLANAR) {
2029  candidate[2] = INTRA_PLANAR;
2030  } else if (candidate[0] != INTRA_DC && candidate[1] != INTRA_DC) {
2031  candidate[2] = INTRA_DC;
2032  } else {
2033  candidate[2] = INTRA_ANGULAR_26;
2034  }
2035  }
2036 
2037  if (prev_intra_luma_pred_flag) {
2038  intra_pred_mode = candidate[lc->pu.mpm_idx];
2039  } else {
2040  if (candidate[0] > candidate[1])
2041  FFSWAP(uint8_t, candidate[0], candidate[1]);
2042  if (candidate[0] > candidate[2])
2043  FFSWAP(uint8_t, candidate[0], candidate[2]);
2044  if (candidate[1] > candidate[2])
2045  FFSWAP(uint8_t, candidate[1], candidate[2]);
2046 
2047  intra_pred_mode = lc->pu.rem_intra_luma_pred_mode;
2048  for (i = 0; i < 3; i++)
2049  if (intra_pred_mode >= candidate[i])
2050  intra_pred_mode++;
2051  }
2052 
2053  /* write the intra prediction units into the mv array */
2054  if (!size_in_pus)
2055  size_in_pus = 1;
2056  for (i = 0; i < size_in_pus; i++) {
2057  memset(&s->tab_ipm[(y_pu + i) * min_pu_width + x_pu],
2058  intra_pred_mode, size_in_pus);
2059 
2060  for (j = 0; j < size_in_pus; j++) {
2061  tab_mvf[(y_pu + j) * min_pu_width + x_pu + i].pred_flag = PF_INTRA;
2062  }
2063  }
2064 
2065  return intra_pred_mode;
2066 }
2067 
2068 static av_always_inline void set_ct_depth(const HEVCContext *s, int x0, int y0,
2069  int log2_cb_size, int ct_depth)
2070 {
2071  int length = (1 << log2_cb_size) >> s->ps.sps->log2_min_cb_size;
2072  int x_cb = x0 >> s->ps.sps->log2_min_cb_size;
2073  int y_cb = y0 >> s->ps.sps->log2_min_cb_size;
2074  int y;
2075 
2076  for (y = 0; y < length; y++)
2077  memset(&s->tab_ct_depth[(y_cb + y) * s->ps.sps->min_cb_width + x_cb],
2078  ct_depth, length);
2079 }
2080 
2081 static const uint8_t tab_mode_idx[] = {
2082  0, 1, 2, 2, 2, 2, 3, 5, 7, 8, 10, 12, 13, 15, 17, 18, 19, 20,
2083  21, 22, 23, 23, 24, 24, 25, 25, 26, 27, 27, 28, 28, 29, 29, 30, 31};
2084 
2085 static void intra_prediction_unit(HEVCLocalContext *lc, int x0, int y0,
2086  int log2_cb_size)
2087 {
2088  const HEVCContext *const s = lc->parent;
2089  static const uint8_t intra_chroma_table[4] = { 0, 26, 10, 1 };
2090  uint8_t prev_intra_luma_pred_flag[4];
2091  int split = lc->cu.part_mode == PART_NxN;
2092  int pb_size = (1 << log2_cb_size) >> split;
2093  int side = split + 1;
2094  int chroma_mode;
2095  int i, j;
2096 
2097  for (i = 0; i < side; i++)
2098  for (j = 0; j < side; j++)
2099  prev_intra_luma_pred_flag[2 * i + j] = ff_hevc_prev_intra_luma_pred_flag_decode(lc);
2100 
2101  for (i = 0; i < side; i++) {
2102  for (j = 0; j < side; j++) {
2103  if (prev_intra_luma_pred_flag[2 * i + j])
2104  lc->pu.mpm_idx = ff_hevc_mpm_idx_decode(lc);
2105  else
2107 
2108  lc->pu.intra_pred_mode[2 * i + j] =
2109  luma_intra_pred_mode(lc, x0 + pb_size * j, y0 + pb_size * i, pb_size,
2110  prev_intra_luma_pred_flag[2 * i + j]);
2111  }
2112  }
2113 
2114  if (s->ps.sps->chroma_format_idc == 3) {
2115  for (i = 0; i < side; i++) {
2116  for (j = 0; j < side; j++) {
2117  lc->pu.chroma_mode_c[2 * i + j] = chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(lc);
2118  if (chroma_mode != 4) {
2119  if (lc->pu.intra_pred_mode[2 * i + j] == intra_chroma_table[chroma_mode])
2120  lc->pu.intra_pred_mode_c[2 * i + j] = 34;
2121  else
2122  lc->pu.intra_pred_mode_c[2 * i + j] = intra_chroma_table[chroma_mode];
2123  } else {
2124  lc->pu.intra_pred_mode_c[2 * i + j] = lc->pu.intra_pred_mode[2 * i + j];
2125  }
2126  }
2127  }
2128  } else if (s->ps.sps->chroma_format_idc == 2) {
2129  int mode_idx;
2130  lc->pu.chroma_mode_c[0] = chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(lc);
2131  if (chroma_mode != 4) {
2132  if (lc->pu.intra_pred_mode[0] == intra_chroma_table[chroma_mode])
2133  mode_idx = 34;
2134  else
2135  mode_idx = intra_chroma_table[chroma_mode];
2136  } else {
2137  mode_idx = lc->pu.intra_pred_mode[0];
2138  }
2139  lc->pu.intra_pred_mode_c[0] = tab_mode_idx[mode_idx];
2140  } else if (s->ps.sps->chroma_format_idc != 0) {
2141  chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(lc);
2142  if (chroma_mode != 4) {
2143  if (lc->pu.intra_pred_mode[0] == intra_chroma_table[chroma_mode])
2144  lc->pu.intra_pred_mode_c[0] = 34;
2145  else
2146  lc->pu.intra_pred_mode_c[0] = intra_chroma_table[chroma_mode];
2147  } else {
2148  lc->pu.intra_pred_mode_c[0] = lc->pu.intra_pred_mode[0];
2149  }
2150  }
2151 }
2152 
2154  int x0, int y0,
2155  int log2_cb_size)
2156 {
2157  const HEVCContext *const s = lc->parent;
2158  int pb_size = 1 << log2_cb_size;
2159  int size_in_pus = pb_size >> s->ps.sps->log2_min_pu_size;
2160  int min_pu_width = s->ps.sps->min_pu_width;
2161  MvField *tab_mvf = s->ref->tab_mvf;
2162  int x_pu = x0 >> s->ps.sps->log2_min_pu_size;
2163  int y_pu = y0 >> s->ps.sps->log2_min_pu_size;
2164  int j, k;
2165 
2166  if (size_in_pus == 0)
2167  size_in_pus = 1;
2168  for (j = 0; j < size_in_pus; j++)
2169  memset(&s->tab_ipm[(y_pu + j) * min_pu_width + x_pu], INTRA_DC, size_in_pus);
2170  if (lc->cu.pred_mode == MODE_INTRA)
2171  for (j = 0; j < size_in_pus; j++)
2172  for (k = 0; k < size_in_pus; k++)
2173  tab_mvf[(y_pu + j) * min_pu_width + x_pu + k].pred_flag = PF_INTRA;
2174 }
2175 
2176 static int hls_coding_unit(HEVCLocalContext *lc, const HEVCContext *s, int x0, int y0, int log2_cb_size)
2177 {
2178  int cb_size = 1 << log2_cb_size;
2179  int log2_min_cb_size = s->ps.sps->log2_min_cb_size;
2180  int length = cb_size >> log2_min_cb_size;
2181  int min_cb_width = s->ps.sps->min_cb_width;
2182  int x_cb = x0 >> log2_min_cb_size;
2183  int y_cb = y0 >> log2_min_cb_size;
2184  int idx = log2_cb_size - 2;
2185  int qp_block_mask = (1<<(s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth)) - 1;
2186  int x, y, ret;
2187 
2188  lc->cu.x = x0;
2189  lc->cu.y = y0;
2190  lc->cu.pred_mode = MODE_INTRA;
2191  lc->cu.part_mode = PART_2Nx2N;
2192  lc->cu.intra_split_flag = 0;
2193 
2194  SAMPLE_CTB(s->skip_flag, x_cb, y_cb) = 0;
2195  for (x = 0; x < 4; x++)
2196  lc->pu.intra_pred_mode[x] = 1;
2197  if (s->ps.pps->transquant_bypass_enable_flag) {
2199  if (lc->cu.cu_transquant_bypass_flag)
2200  set_deblocking_bypass(s, x0, y0, log2_cb_size);
2201  } else
2202  lc->cu.cu_transquant_bypass_flag = 0;
2203 
2204  if (s->sh.slice_type != HEVC_SLICE_I) {
2205  uint8_t skip_flag = ff_hevc_skip_flag_decode(lc, x0, y0, x_cb, y_cb);
2206 
2207  x = y_cb * min_cb_width + x_cb;
2208  for (y = 0; y < length; y++) {
2209  memset(&s->skip_flag[x], skip_flag, length);
2210  x += min_cb_width;
2211  }
2212  lc->cu.pred_mode = skip_flag ? MODE_SKIP : MODE_INTER;
2213  } else {
2214  x = y_cb * min_cb_width + x_cb;
2215  for (y = 0; y < length; y++) {
2216  memset(&s->skip_flag[x], 0, length);
2217  x += min_cb_width;
2218  }
2219  }
2220 
2221  if (SAMPLE_CTB(s->skip_flag, x_cb, y_cb)) {
2222  hls_prediction_unit(lc, x0, y0, cb_size, cb_size, log2_cb_size, 0, idx);
2223  intra_prediction_unit_default_value(lc, x0, y0, log2_cb_size);
2224 
2225  if (!s->sh.disable_deblocking_filter_flag)
2226  ff_hevc_deblocking_boundary_strengths(lc, x0, y0, log2_cb_size);
2227  } else {
2228  int pcm_flag = 0;
2229 
2230  if (s->sh.slice_type != HEVC_SLICE_I)
2232  if (lc->cu.pred_mode != MODE_INTRA ||
2233  log2_cb_size == s->ps.sps->log2_min_cb_size) {
2234  lc->cu.part_mode = ff_hevc_part_mode_decode(lc, log2_cb_size);
2235  lc->cu.intra_split_flag = lc->cu.part_mode == PART_NxN &&
2236  lc->cu.pred_mode == MODE_INTRA;
2237  }
2238 
2239  if (lc->cu.pred_mode == MODE_INTRA) {
2240  if (lc->cu.part_mode == PART_2Nx2N && s->ps.sps->pcm_enabled_flag &&
2241  log2_cb_size >= s->ps.sps->pcm.log2_min_pcm_cb_size &&
2242  log2_cb_size <= s->ps.sps->pcm.log2_max_pcm_cb_size) {
2243  pcm_flag = ff_hevc_pcm_flag_decode(lc);
2244  }
2245  if (pcm_flag) {
2246  intra_prediction_unit_default_value(lc, x0, y0, log2_cb_size);
2247  ret = hls_pcm_sample(lc, x0, y0, log2_cb_size);
2248  if (s->ps.sps->pcm.loop_filter_disable_flag)
2249  set_deblocking_bypass(s, x0, y0, log2_cb_size);
2250 
2251  if (ret < 0)
2252  return ret;
2253  } else {
2254  intra_prediction_unit(lc, x0, y0, log2_cb_size);
2255  }
2256  } else {
2257  intra_prediction_unit_default_value(lc, x0, y0, log2_cb_size);
2258  switch (lc->cu.part_mode) {
2259  case PART_2Nx2N:
2260  hls_prediction_unit(lc, x0, y0, cb_size, cb_size, log2_cb_size, 0, idx);
2261  break;
2262  case PART_2NxN:
2263  hls_prediction_unit(lc, x0, y0, cb_size, cb_size / 2, log2_cb_size, 0, idx);
2264  hls_prediction_unit(lc, x0, y0 + cb_size / 2, cb_size, cb_size / 2, log2_cb_size, 1, idx);
2265  break;
2266  case PART_Nx2N:
2267  hls_prediction_unit(lc, x0, y0, cb_size / 2, cb_size, log2_cb_size, 0, idx - 1);
2268  hls_prediction_unit(lc, x0 + cb_size / 2, y0, cb_size / 2, cb_size, log2_cb_size, 1, idx - 1);
2269  break;
2270  case PART_2NxnU:
2271  hls_prediction_unit(lc, x0, y0, cb_size, cb_size / 4, log2_cb_size, 0, idx);
2272  hls_prediction_unit(lc, x0, y0 + cb_size / 4, cb_size, cb_size * 3 / 4, log2_cb_size, 1, idx);
2273  break;
2274  case PART_2NxnD:
2275  hls_prediction_unit(lc, x0, y0, cb_size, cb_size * 3 / 4, log2_cb_size, 0, idx);
2276  hls_prediction_unit(lc, x0, y0 + cb_size * 3 / 4, cb_size, cb_size / 4, log2_cb_size, 1, idx);
2277  break;
2278  case PART_nLx2N:
2279  hls_prediction_unit(lc, x0, y0, cb_size / 4, cb_size, log2_cb_size, 0, idx - 2);
2280  hls_prediction_unit(lc, x0 + cb_size / 4, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 1, idx - 2);
2281  break;
2282  case PART_nRx2N:
2283  hls_prediction_unit(lc, x0, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 0, idx - 2);
2284  hls_prediction_unit(lc, x0 + cb_size * 3 / 4, y0, cb_size / 4, cb_size, log2_cb_size, 1, idx - 2);
2285  break;
2286  case PART_NxN:
2287  hls_prediction_unit(lc, x0, y0, cb_size / 2, cb_size / 2, log2_cb_size, 0, idx - 1);
2288  hls_prediction_unit(lc, x0 + cb_size / 2, y0, cb_size / 2, cb_size / 2, log2_cb_size, 1, idx - 1);
2289  hls_prediction_unit(lc, x0, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 2, idx - 1);
2290  hls_prediction_unit(lc, x0 + cb_size / 2, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 3, idx - 1);
2291  break;
2292  }
2293  }
2294 
2295  if (!pcm_flag) {
2296  int rqt_root_cbf = 1;
2297 
2298  if (lc->cu.pred_mode != MODE_INTRA &&
2299  !(lc->cu.part_mode == PART_2Nx2N && lc->pu.merge_flag)) {
2300  rqt_root_cbf = ff_hevc_no_residual_syntax_flag_decode(lc);
2301  }
2302  if (rqt_root_cbf) {
2303  const static int cbf[2] = { 0 };
2304  lc->cu.max_trafo_depth = lc->cu.pred_mode == MODE_INTRA ?
2305  s->ps.sps->max_transform_hierarchy_depth_intra + lc->cu.intra_split_flag :
2306  s->ps.sps->max_transform_hierarchy_depth_inter;
2307  ret = hls_transform_tree(lc, x0, y0, x0, y0, x0, y0,
2308  log2_cb_size,
2309  log2_cb_size, 0, 0, cbf, cbf);
2310  if (ret < 0)
2311  return ret;
2312  } else {
2313  if (!s->sh.disable_deblocking_filter_flag)
2314  ff_hevc_deblocking_boundary_strengths(lc, x0, y0, log2_cb_size);
2315  }
2316  }
2317  }
2318 
2319  if (s->ps.pps->cu_qp_delta_enabled_flag && lc->tu.is_cu_qp_delta_coded == 0)
2320  ff_hevc_set_qPy(lc, x0, y0, log2_cb_size);
2321 
2322  x = y_cb * min_cb_width + x_cb;
2323  for (y = 0; y < length; y++) {
2324  memset(&s->qp_y_tab[x], lc->qp_y, length);
2325  x += min_cb_width;
2326  }
2327 
2328  if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
2329  ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0) {
2330  lc->qPy_pred = lc->qp_y;
2331  }
2332 
2333  set_ct_depth(s, x0, y0, log2_cb_size, lc->ct_depth);
2334 
2335  return 0;
2336 }
2337 
2338 static int hls_coding_quadtree(HEVCLocalContext *lc, int x0, int y0,
2339  int log2_cb_size, int cb_depth)
2340 {
2341  const HEVCContext *const s = lc->parent;
2342  const int cb_size = 1 << log2_cb_size;
2343  int ret;
2344  int split_cu;
2345 
2346  lc->ct_depth = cb_depth;
2347  if (x0 + cb_size <= s->ps.sps->width &&
2348  y0 + cb_size <= s->ps.sps->height &&
2349  log2_cb_size > s->ps.sps->log2_min_cb_size) {
2350  split_cu = ff_hevc_split_coding_unit_flag_decode(lc, cb_depth, x0, y0);
2351  } else {
2352  split_cu = (log2_cb_size > s->ps.sps->log2_min_cb_size);
2353  }
2354  if (s->ps.pps->cu_qp_delta_enabled_flag &&
2355  log2_cb_size >= s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth) {
2356  lc->tu.is_cu_qp_delta_coded = 0;
2357  lc->tu.cu_qp_delta = 0;
2358  }
2359 
2360  if (s->sh.cu_chroma_qp_offset_enabled_flag &&
2361  log2_cb_size >= s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_chroma_qp_offset_depth) {
2363  }
2364 
2365  if (split_cu) {
2366  int qp_block_mask = (1<<(s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth)) - 1;
2367  const int cb_size_split = cb_size >> 1;
2368  const int x1 = x0 + cb_size_split;
2369  const int y1 = y0 + cb_size_split;
2370 
2371  int more_data = 0;
2372 
2373  more_data = hls_coding_quadtree(lc, x0, y0, log2_cb_size - 1, cb_depth + 1);
2374  if (more_data < 0)
2375  return more_data;
2376 
2377  if (more_data && x1 < s->ps.sps->width) {
2378  more_data = hls_coding_quadtree(lc, x1, y0, log2_cb_size - 1, cb_depth + 1);
2379  if (more_data < 0)
2380  return more_data;
2381  }
2382  if (more_data && y1 < s->ps.sps->height) {
2383  more_data = hls_coding_quadtree(lc, x0, y1, log2_cb_size - 1, cb_depth + 1);
2384  if (more_data < 0)
2385  return more_data;
2386  }
2387  if (more_data && x1 < s->ps.sps->width &&
2388  y1 < s->ps.sps->height) {
2389  more_data = hls_coding_quadtree(lc, x1, y1, log2_cb_size - 1, cb_depth + 1);
2390  if (more_data < 0)
2391  return more_data;
2392  }
2393 
2394  if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
2395  ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0)
2396  lc->qPy_pred = lc->qp_y;
2397 
2398  if (more_data)
2399  return ((x1 + cb_size_split) < s->ps.sps->width ||
2400  (y1 + cb_size_split) < s->ps.sps->height);
2401  else
2402  return 0;
2403  } else {
2404  ret = hls_coding_unit(lc, s, x0, y0, log2_cb_size);
2405  if (ret < 0)
2406  return ret;
2407  if ((!((x0 + cb_size) %
2408  (1 << (s->ps.sps->log2_ctb_size))) ||
2409  (x0 + cb_size >= s->ps.sps->width)) &&
2410  (!((y0 + cb_size) %
2411  (1 << (s->ps.sps->log2_ctb_size))) ||
2412  (y0 + cb_size >= s->ps.sps->height))) {
2413  int end_of_slice_flag = ff_hevc_end_of_slice_flag_decode(lc);
2414  return !end_of_slice_flag;
2415  } else {
2416  return 1;
2417  }
2418  }
2419 
2420  return 0;
2421 }
2422 
2423 static void hls_decode_neighbour(HEVCLocalContext *lc, int x_ctb, int y_ctb,
2424  int ctb_addr_ts)
2425 {
2426  const HEVCContext *const s = lc->parent;
2427  int ctb_size = 1 << s->ps.sps->log2_ctb_size;
2428  int ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2429  int ctb_addr_in_slice = ctb_addr_rs - s->sh.slice_addr;
2430 
2431  s->tab_slice_address[ctb_addr_rs] = s->sh.slice_addr;
2432 
2433  if (s->ps.pps->entropy_coding_sync_enabled_flag) {
2434  if (x_ctb == 0 && (y_ctb & (ctb_size - 1)) == 0)
2435  lc->first_qp_group = 1;
2436  lc->end_of_tiles_x = s->ps.sps->width;
2437  } else if (s->ps.pps->tiles_enabled_flag) {
2438  if (ctb_addr_ts && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[ctb_addr_ts - 1]) {
2439  int idxX = s->ps.pps->col_idxX[x_ctb >> s->ps.sps->log2_ctb_size];
2440  lc->end_of_tiles_x = x_ctb + (s->ps.pps->column_width[idxX] << s->ps.sps->log2_ctb_size);
2441  lc->first_qp_group = 1;
2442  }
2443  } else {
2444  lc->end_of_tiles_x = s->ps.sps->width;
2445  }
2446 
2447  lc->end_of_tiles_y = FFMIN(y_ctb + ctb_size, s->ps.sps->height);
2448 
2449  lc->boundary_flags = 0;
2450  if (s->ps.pps->tiles_enabled_flag) {
2451  if (x_ctb > 0 && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - 1]])
2453  if (x_ctb > 0 && s->tab_slice_address[ctb_addr_rs] != s->tab_slice_address[ctb_addr_rs - 1])
2455  if (y_ctb > 0 && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - s->ps.sps->ctb_width]])
2457  if (y_ctb > 0 && s->tab_slice_address[ctb_addr_rs] != s->tab_slice_address[ctb_addr_rs - s->ps.sps->ctb_width])
2459  } else {
2460  if (ctb_addr_in_slice <= 0)
2462  if (ctb_addr_in_slice < s->ps.sps->ctb_width)
2464  }
2465 
2466  lc->ctb_left_flag = ((x_ctb > 0) && (ctb_addr_in_slice > 0) && !(lc->boundary_flags & BOUNDARY_LEFT_TILE));
2467  lc->ctb_up_flag = ((y_ctb > 0) && (ctb_addr_in_slice >= s->ps.sps->ctb_width) && !(lc->boundary_flags & BOUNDARY_UPPER_TILE));
2468  lc->ctb_up_right_flag = ((y_ctb > 0) && (ctb_addr_in_slice+1 >= s->ps.sps->ctb_width) && (s->ps.pps->tile_id[ctb_addr_ts] == s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs+1 - s->ps.sps->ctb_width]]));
2469  lc->ctb_up_left_flag = ((x_ctb > 0) && (y_ctb > 0) && (ctb_addr_in_slice-1 >= s->ps.sps->ctb_width) && (s->ps.pps->tile_id[ctb_addr_ts] == s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs-1 - s->ps.sps->ctb_width]]));
2470 }
2471 
2472 static int hls_decode_entry(AVCodecContext *avctxt, void *isFilterThread)
2473 {
2474  HEVCContext *s = avctxt->priv_data;
2475  HEVCLocalContext *const lc = s->HEVClc;
2476  int ctb_size = 1 << s->ps.sps->log2_ctb_size;
2477  int more_data = 1;
2478  int x_ctb = 0;
2479  int y_ctb = 0;
2480  int ctb_addr_ts = s->ps.pps->ctb_addr_rs_to_ts[s->sh.slice_ctb_addr_rs];
2481  int ret;
2482 
2483  if (!ctb_addr_ts && s->sh.dependent_slice_segment_flag) {
2484  av_log(s->avctx, AV_LOG_ERROR, "Impossible initial tile.\n");
2485  return AVERROR_INVALIDDATA;
2486  }
2487 
2488  if (s->sh.dependent_slice_segment_flag) {
2489  int prev_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts - 1];
2490  if (s->tab_slice_address[prev_rs] != s->sh.slice_addr) {
2491  av_log(s->avctx, AV_LOG_ERROR, "Previous slice segment missing\n");
2492  return AVERROR_INVALIDDATA;
2493  }
2494  }
2495 
2496  while (more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
2497  int ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2498 
2499  x_ctb = (ctb_addr_rs % ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size)) << s->ps.sps->log2_ctb_size;
2500  y_ctb = (ctb_addr_rs / ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size)) << s->ps.sps->log2_ctb_size;
2501  hls_decode_neighbour(lc, x_ctb, y_ctb, ctb_addr_ts);
2502 
2503  ret = ff_hevc_cabac_init(lc, ctb_addr_ts);
2504  if (ret < 0) {
2505  s->tab_slice_address[ctb_addr_rs] = -1;
2506  return ret;
2507  }
2508 
2509  hls_sao_param(lc, x_ctb >> s->ps.sps->log2_ctb_size, y_ctb >> s->ps.sps->log2_ctb_size);
2510 
2511  s->deblock[ctb_addr_rs].beta_offset = s->sh.beta_offset;
2512  s->deblock[ctb_addr_rs].tc_offset = s->sh.tc_offset;
2513  s->filter_slice_edges[ctb_addr_rs] = s->sh.slice_loop_filter_across_slices_enabled_flag;
2514 
2515  more_data = hls_coding_quadtree(lc, x_ctb, y_ctb, s->ps.sps->log2_ctb_size, 0);
2516  if (more_data < 0) {
2517  s->tab_slice_address[ctb_addr_rs] = -1;
2518  return more_data;
2519  }
2520 
2521 
2522  ctb_addr_ts++;
2523  ff_hevc_save_states(lc, ctb_addr_ts);
2524  ff_hevc_hls_filters(lc, x_ctb, y_ctb, ctb_size);
2525  }
2526 
2527  if (x_ctb + ctb_size >= s->ps.sps->width &&
2528  y_ctb + ctb_size >= s->ps.sps->height)
2529  ff_hevc_hls_filter(lc, x_ctb, y_ctb, ctb_size);
2530 
2531  return ctb_addr_ts;
2532 }
2533 
2535 {
2536  int arg[2];
2537  int ret[2];
2538 
2539  arg[0] = 0;
2540  arg[1] = 1;
2541 
2542  s->avctx->execute(s->avctx, hls_decode_entry, arg, ret , 1, sizeof(int));
2543  return ret[0];
2544 }
2545 static int hls_decode_entry_wpp(AVCodecContext *avctxt, void *hevc_lclist,
2546  int job, int self_id)
2547 {
2548  HEVCLocalContext *lc = ((HEVCLocalContext**)hevc_lclist)[self_id];
2549  const HEVCContext *const s = lc->parent;
2550  int ctb_size = 1 << s->ps.sps->log2_ctb_size;
2551  int more_data = 1;
2552  int ctb_row = job;
2553  int ctb_addr_rs = s->sh.slice_ctb_addr_rs + ctb_row * ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size);
2554  int ctb_addr_ts = s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs];
2555  int thread = ctb_row % s->threads_number;
2556  int ret;
2557 
2558  if(ctb_row) {
2559  ret = init_get_bits8(&lc->gb, s->data + s->sh.offset[ctb_row - 1], s->sh.size[ctb_row - 1]);
2560  if (ret < 0)
2561  goto error;
2562  ff_init_cabac_decoder(&lc->cc, s->data + s->sh.offset[(ctb_row)-1], s->sh.size[ctb_row - 1]);
2563  }
2564 
2565  while(more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
2566  int x_ctb = (ctb_addr_rs % s->ps.sps->ctb_width) << s->ps.sps->log2_ctb_size;
2567  int y_ctb = (ctb_addr_rs / s->ps.sps->ctb_width) << s->ps.sps->log2_ctb_size;
2568 
2569  hls_decode_neighbour(lc, x_ctb, y_ctb, ctb_addr_ts);
2570 
2571  ff_thread_await_progress2(s->avctx, ctb_row, thread, SHIFT_CTB_WPP);
2572 
2573  /* atomic_load's prototype requires a pointer to non-const atomic variable
2574  * (due to implementations via mutexes, where reads involve writes).
2575  * Of course, casting const away here is nevertheless safe. */
2576  if (atomic_load((atomic_int*)&s->wpp_err)) {
2577  ff_thread_report_progress2(s->avctx, ctb_row , thread, SHIFT_CTB_WPP);
2578  return 0;
2579  }
2580 
2581  ret = ff_hevc_cabac_init(lc, ctb_addr_ts);
2582  if (ret < 0)
2583  goto error;
2584  hls_sao_param(lc, x_ctb >> s->ps.sps->log2_ctb_size, y_ctb >> s->ps.sps->log2_ctb_size);
2585  more_data = hls_coding_quadtree(lc, x_ctb, y_ctb, s->ps.sps->log2_ctb_size, 0);
2586 
2587  if (more_data < 0) {
2588  ret = more_data;
2589  goto error;
2590  }
2591 
2592  ctb_addr_ts++;
2593 
2594  ff_hevc_save_states(lc, ctb_addr_ts);
2595  ff_thread_report_progress2(s->avctx, ctb_row, thread, 1);
2596  ff_hevc_hls_filters(lc, x_ctb, y_ctb, ctb_size);
2597 
2598  if (!more_data && (x_ctb+ctb_size) < s->ps.sps->width && ctb_row != s->sh.num_entry_point_offsets) {
2599  /* Casting const away here is safe, because it is an atomic operation. */
2600  atomic_store((atomic_int*)&s->wpp_err, 1);
2601  ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2602  return 0;
2603  }
2604 
2605  if ((x_ctb+ctb_size) >= s->ps.sps->width && (y_ctb+ctb_size) >= s->ps.sps->height ) {
2606  ff_hevc_hls_filter(lc, x_ctb, y_ctb, ctb_size);
2607  ff_thread_report_progress2(s->avctx, ctb_row , thread, SHIFT_CTB_WPP);
2608  return ctb_addr_ts;
2609  }
2610  ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2611  x_ctb+=ctb_size;
2612 
2613  if(x_ctb >= s->ps.sps->width) {
2614  break;
2615  }
2616  }
2617  ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2618 
2619  return 0;
2620 error:
2621  s->tab_slice_address[ctb_addr_rs] = -1;
2622  /* Casting const away here is safe, because it is an atomic operation. */
2623  atomic_store((atomic_int*)&s->wpp_err, 1);
2624  ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2625  return ret;
2626 }
2627 
2628 static int hls_slice_data_wpp(HEVCContext *s, const H2645NAL *nal)
2629 {
2630  const uint8_t *data = nal->data;
2631  int length = nal->size;
2632  HEVCLocalContext *lc = s->HEVClc;
2633  int *ret;
2634  int64_t offset;
2635  int64_t startheader, cmpt = 0;
2636  int i, j, res = 0;
2637 
2638  if (s->sh.slice_ctb_addr_rs + s->sh.num_entry_point_offsets * s->ps.sps->ctb_width >= s->ps.sps->ctb_width * s->ps.sps->ctb_height) {
2639  av_log(s->avctx, AV_LOG_ERROR, "WPP ctb addresses are wrong (%d %d %d %d)\n",
2640  s->sh.slice_ctb_addr_rs, s->sh.num_entry_point_offsets,
2641  s->ps.sps->ctb_width, s->ps.sps->ctb_height
2642  );
2643  return AVERROR_INVALIDDATA;
2644  }
2645 
2646  for (i = 1; i < s->threads_number; i++) {
2647  if (s->HEVClcList[i])
2648  continue;
2649  s->HEVClcList[i] = av_mallocz(sizeof(HEVCLocalContext));
2650  if (!s->HEVClcList[i])
2651  return AVERROR(ENOMEM);
2652  s->HEVClcList[i]->logctx = s->avctx;
2653  s->HEVClcList[i]->parent = s;
2654  s->HEVClcList[i]->common_cabac_state = &s->cabac;
2655  }
2656 
2657  offset = (lc->gb.index >> 3);
2658 
2659  for (j = 0, cmpt = 0, startheader = offset + s->sh.entry_point_offset[0]; j < nal->skipped_bytes; j++) {
2660  if (nal->skipped_bytes_pos[j] >= offset && nal->skipped_bytes_pos[j] < startheader) {
2661  startheader--;
2662  cmpt++;
2663  }
2664  }
2665 
2666  for (i = 1; i < s->sh.num_entry_point_offsets; i++) {
2667  offset += (s->sh.entry_point_offset[i - 1] - cmpt);
2668  for (j = 0, cmpt = 0, startheader = offset
2669  + s->sh.entry_point_offset[i]; j < nal->skipped_bytes; j++) {
2670  if (nal->skipped_bytes_pos[j] >= offset && nal->skipped_bytes_pos[j] < startheader) {
2671  startheader--;
2672  cmpt++;
2673  }
2674  }
2675  s->sh.size[i - 1] = s->sh.entry_point_offset[i] - cmpt;
2676  s->sh.offset[i - 1] = offset;
2677 
2678  }
2679  if (s->sh.num_entry_point_offsets != 0) {
2680  offset += s->sh.entry_point_offset[s->sh.num_entry_point_offsets - 1] - cmpt;
2681  if (length < offset) {
2682  av_log(s->avctx, AV_LOG_ERROR, "entry_point_offset table is corrupted\n");
2683  return AVERROR_INVALIDDATA;
2684  }
2685  s->sh.size[s->sh.num_entry_point_offsets - 1] = length - offset;
2686  s->sh.offset[s->sh.num_entry_point_offsets - 1] = offset;
2687 
2688  }
2689  s->data = data;
2690 
2691  for (i = 1; i < s->threads_number; i++) {
2692  s->HEVClcList[i]->first_qp_group = 1;
2693  s->HEVClcList[i]->qp_y = s->HEVClc->qp_y;
2694  }
2695 
2696  atomic_store(&s->wpp_err, 0);
2697  res = ff_slice_thread_allocz_entries(s->avctx, s->sh.num_entry_point_offsets + 1);
2698  if (res < 0)
2699  return res;
2700 
2701  ret = av_calloc(s->sh.num_entry_point_offsets + 1, sizeof(*ret));
2702  if (!ret)
2703  return AVERROR(ENOMEM);
2704 
2705  if (s->ps.pps->entropy_coding_sync_enabled_flag)
2706  s->avctx->execute2(s->avctx, hls_decode_entry_wpp, s->HEVClcList, ret, s->sh.num_entry_point_offsets + 1);
2707 
2708  for (i = 0; i <= s->sh.num_entry_point_offsets; i++)
2709  res += ret[i];
2710 
2711  av_free(ret);
2712  return res;
2713 }
2714 
2716 {
2717  AVFrame *out = s->ref->frame;
2718  int ret;
2719 
2720  if (s->sei.frame_packing.present &&
2721  s->sei.frame_packing.arrangement_type >= 3 &&
2722  s->sei.frame_packing.arrangement_type <= 5 &&
2723  s->sei.frame_packing.content_interpretation_type > 0 &&
2724  s->sei.frame_packing.content_interpretation_type < 3) {
2726  if (!stereo)
2727  return AVERROR(ENOMEM);
2728 
2729  switch (s->sei.frame_packing.arrangement_type) {
2730  case 3:
2731  if (s->sei.frame_packing.quincunx_subsampling)
2733  else
2734  stereo->type = AV_STEREO3D_SIDEBYSIDE;
2735  break;
2736  case 4:
2737  stereo->type = AV_STEREO3D_TOPBOTTOM;
2738  break;
2739  case 5:
2740  stereo->type = AV_STEREO3D_FRAMESEQUENCE;
2741  break;
2742  }
2743 
2744  if (s->sei.frame_packing.content_interpretation_type == 2)
2745  stereo->flags = AV_STEREO3D_FLAG_INVERT;
2746 
2747  if (s->sei.frame_packing.arrangement_type == 5) {
2748  if (s->sei.frame_packing.current_frame_is_frame0_flag)
2749  stereo->view = AV_STEREO3D_VIEW_LEFT;
2750  else
2751  stereo->view = AV_STEREO3D_VIEW_RIGHT;
2752  }
2753  }
2754 
2755  if (s->sei.display_orientation.present &&
2756  (s->sei.display_orientation.anticlockwise_rotation ||
2757  s->sei.display_orientation.hflip || s->sei.display_orientation.vflip)) {
2758  double angle = s->sei.display_orientation.anticlockwise_rotation * 360 / (double) (1 << 16);
2761  sizeof(int32_t) * 9);
2762  if (!rotation)
2763  return AVERROR(ENOMEM);
2764 
2765  /* av_display_rotation_set() expects the angle in the clockwise
2766  * direction, hence the first minus.
2767  * The below code applies the flips after the rotation, yet
2768  * the H.2645 specs require flipping to be applied first.
2769  * Because of R O(phi) = O(-phi) R (where R is flipping around
2770  * an arbitatry axis and O(phi) is the proper rotation by phi)
2771  * we can create display matrices as desired by negating
2772  * the degree once for every flip applied. */
2773  angle = -angle * (1 - 2 * !!s->sei.display_orientation.hflip)
2774  * (1 - 2 * !!s->sei.display_orientation.vflip);
2775  av_display_rotation_set((int32_t *)rotation->data, angle);
2776  av_display_matrix_flip((int32_t *)rotation->data,
2777  s->sei.display_orientation.hflip,
2778  s->sei.display_orientation.vflip);
2779  }
2780 
2781  // Decrement the mastering display flag when IRAP frame has no_rasl_output_flag=1
2782  // so the side data persists for the entire coded video sequence.
2783  if (s->sei.mastering_display.present > 0 &&
2784  IS_IRAP(s) && s->no_rasl_output_flag) {
2785  s->sei.mastering_display.present--;
2786  }
2787  if (s->sei.mastering_display.present) {
2788  // HEVC uses a g,b,r ordering, which we convert to a more natural r,g,b
2789  const int mapping[3] = {2, 0, 1};
2790  const int chroma_den = 50000;
2791  const int luma_den = 10000;
2792  int i;
2793  AVMasteringDisplayMetadata *metadata =
2795  if (!metadata)
2796  return AVERROR(ENOMEM);
2797 
2798  for (i = 0; i < 3; i++) {
2799  const int j = mapping[i];
2800  metadata->display_primaries[i][0].num = s->sei.mastering_display.display_primaries[j][0];
2801  metadata->display_primaries[i][0].den = chroma_den;
2802  metadata->display_primaries[i][1].num = s->sei.mastering_display.display_primaries[j][1];
2803  metadata->display_primaries[i][1].den = chroma_den;
2804  }
2805  metadata->white_point[0].num = s->sei.mastering_display.white_point[0];
2806  metadata->white_point[0].den = chroma_den;
2807  metadata->white_point[1].num = s->sei.mastering_display.white_point[1];
2808  metadata->white_point[1].den = chroma_den;
2809 
2810  metadata->max_luminance.num = s->sei.mastering_display.max_luminance;
2811  metadata->max_luminance.den = luma_den;
2812  metadata->min_luminance.num = s->sei.mastering_display.min_luminance;
2813  metadata->min_luminance.den = luma_den;
2814  metadata->has_luminance = 1;
2815  metadata->has_primaries = 1;
2816 
2817  av_log(s->avctx, AV_LOG_DEBUG, "Mastering Display Metadata:\n");
2818  av_log(s->avctx, AV_LOG_DEBUG,
2819  "r(%5.4f,%5.4f) g(%5.4f,%5.4f) b(%5.4f %5.4f) wp(%5.4f, %5.4f)\n",
2820  av_q2d(metadata->display_primaries[0][0]),
2821  av_q2d(metadata->display_primaries[0][1]),
2822  av_q2d(metadata->display_primaries[1][0]),
2823  av_q2d(metadata->display_primaries[1][1]),
2824  av_q2d(metadata->display_primaries[2][0]),
2825  av_q2d(metadata->display_primaries[2][1]),
2826  av_q2d(metadata->white_point[0]), av_q2d(metadata->white_point[1]));
2827  av_log(s->avctx, AV_LOG_DEBUG,
2828  "min_luminance=%f, max_luminance=%f\n",
2829  av_q2d(metadata->min_luminance), av_q2d(metadata->max_luminance));
2830  }
2831  // Decrement the mastering display flag when IRAP frame has no_rasl_output_flag=1
2832  // so the side data persists for the entire coded video sequence.
2833  if (s->sei.content_light.present > 0 &&
2834  IS_IRAP(s) && s->no_rasl_output_flag) {
2835  s->sei.content_light.present--;
2836  }
2837  if (s->sei.content_light.present) {
2838  AVContentLightMetadata *metadata =
2840  if (!metadata)
2841  return AVERROR(ENOMEM);
2842  metadata->MaxCLL = s->sei.content_light.max_content_light_level;
2843  metadata->MaxFALL = s->sei.content_light.max_pic_average_light_level;
2844 
2845  av_log(s->avctx, AV_LOG_DEBUG, "Content Light Level Metadata:\n");
2846  av_log(s->avctx, AV_LOG_DEBUG, "MaxCLL=%d, MaxFALL=%d\n",
2847  metadata->MaxCLL, metadata->MaxFALL);
2848  }
2849 
2850  if (s->sei.a53_caption.buf_ref) {
2851  HEVCSEIA53Caption *a53 = &s->sei.a53_caption;
2852 
2854  if (!sd)
2855  av_buffer_unref(&a53->buf_ref);
2856  a53->buf_ref = NULL;
2857  }
2858 
2859  for (int i = 0; i < s->sei.unregistered.nb_buf_ref; i++) {
2860  HEVCSEIUnregistered *unreg = &s->sei.unregistered;
2861 
2862  if (unreg->buf_ref[i]) {
2865  unreg->buf_ref[i]);
2866  if (!sd)
2867  av_buffer_unref(&unreg->buf_ref[i]);
2868  unreg->buf_ref[i] = NULL;
2869  }
2870  }
2871  s->sei.unregistered.nb_buf_ref = 0;
2872 
2873  if (s->sei.timecode.present) {
2874  uint32_t *tc_sd;
2875  char tcbuf[AV_TIMECODE_STR_SIZE];
2877  sizeof(uint32_t) * 4);
2878  if (!tcside)
2879  return AVERROR(ENOMEM);
2880 
2881  tc_sd = (uint32_t*)tcside->data;
2882  tc_sd[0] = s->sei.timecode.num_clock_ts;
2883 
2884  for (int i = 0; i < tc_sd[0]; i++) {
2885  int drop = s->sei.timecode.cnt_dropped_flag[i];
2886  int hh = s->sei.timecode.hours_value[i];
2887  int mm = s->sei.timecode.minutes_value[i];
2888  int ss = s->sei.timecode.seconds_value[i];
2889  int ff = s->sei.timecode.n_frames[i];
2890 
2891  tc_sd[i + 1] = av_timecode_get_smpte(s->avctx->framerate, drop, hh, mm, ss, ff);
2892  av_timecode_make_smpte_tc_string2(tcbuf, s->avctx->framerate, tc_sd[i + 1], 0, 0);
2893  av_dict_set(&out->metadata, "timecode", tcbuf, 0);
2894  }
2895 
2896  s->sei.timecode.num_clock_ts = 0;
2897  }
2898 
2899  if (s->sei.film_grain_characteristics.present) {
2900  HEVCSEIFilmGrainCharacteristics *fgc = &s->sei.film_grain_characteristics;
2902  if (!fgp)
2903  return AVERROR(ENOMEM);
2904 
2906  fgp->seed = s->ref->poc; /* no poc_offset in HEVC */
2907 
2908  fgp->codec.h274.model_id = fgc->model_id;
2912  fgp->codec.h274.color_range = fgc->full_range + 1;
2915  fgp->codec.h274.color_space = fgc->matrix_coeffs;
2916  } else {
2917  const HEVCSPS *sps = s->ps.sps;
2918  const VUI *vui = &sps->vui;
2919  fgp->codec.h274.bit_depth_luma = sps->bit_depth;
2920  fgp->codec.h274.bit_depth_chroma = sps->bit_depth_chroma;
2922  fgp->codec.h274.color_range = vui->video_full_range_flag + 1;
2923  else
2928  fgp->codec.h274.color_space = vui->matrix_coeffs;
2929  } else {
2933  }
2934  }
2937 
2939  sizeof(fgp->codec.h274.component_model_present));
2941  sizeof(fgp->codec.h274.num_intensity_intervals));
2942  memcpy(&fgp->codec.h274.num_model_values, &fgc->num_model_values,
2943  sizeof(fgp->codec.h274.num_model_values));
2948  memcpy(&fgp->codec.h274.comp_model_value, &fgc->comp_model_value,
2949  sizeof(fgp->codec.h274.comp_model_value));
2950 
2951  fgc->present = fgc->persistence_flag;
2952  }
2953 
2954  if (s->sei.dynamic_hdr_plus.info) {
2955  AVBufferRef *info_ref = av_buffer_ref(s->sei.dynamic_hdr_plus.info);
2956  if (!info_ref)
2957  return AVERROR(ENOMEM);
2958 
2960  av_buffer_unref(&info_ref);
2961  return AVERROR(ENOMEM);
2962  }
2963  }
2964 
2965  if (s->rpu_buf) {
2967  if (!rpu)
2968  return AVERROR(ENOMEM);
2969 
2970  s->rpu_buf = NULL;
2971  }
2972 
2973  if ((ret = ff_dovi_attach_side_data(&s->dovi_ctx, out)) < 0)
2974  return ret;
2975 
2976  if (s->sei.dynamic_hdr_vivid.info) {
2977  AVBufferRef *info_ref = av_buffer_ref(s->sei.dynamic_hdr_vivid.info);
2978  if (!info_ref)
2979  return AVERROR(ENOMEM);
2980 
2982  av_buffer_unref(&info_ref);
2983  return AVERROR(ENOMEM);
2984  }
2985  }
2986 
2987  return 0;
2988 }
2989 
2991 {
2992  HEVCLocalContext *lc = s->HEVClc;
2993  int pic_size_in_ctb = ((s->ps.sps->width >> s->ps.sps->log2_min_cb_size) + 1) *
2994  ((s->ps.sps->height >> s->ps.sps->log2_min_cb_size) + 1);
2995  int ret;
2996 
2997  memset(s->horizontal_bs, 0, s->bs_width * s->bs_height);
2998  memset(s->vertical_bs, 0, s->bs_width * s->bs_height);
2999  memset(s->cbf_luma, 0, s->ps.sps->min_tb_width * s->ps.sps->min_tb_height);
3000  memset(s->is_pcm, 0, (s->ps.sps->min_pu_width + 1) * (s->ps.sps->min_pu_height + 1));
3001  memset(s->tab_slice_address, -1, pic_size_in_ctb * sizeof(*s->tab_slice_address));
3002 
3003  s->is_decoded = 0;
3004  s->first_nal_type = s->nal_unit_type;
3005 
3006  s->no_rasl_output_flag = IS_IDR(s) || IS_BLA(s) || (s->nal_unit_type == HEVC_NAL_CRA_NUT && s->last_eos);
3007 
3008  if (s->ps.pps->tiles_enabled_flag)
3009  lc->end_of_tiles_x = s->ps.pps->column_width[0] << s->ps.sps->log2_ctb_size;
3010 
3011  ret = ff_hevc_set_new_ref(s, &s->frame, s->poc);
3012  if (ret < 0)
3013  goto fail;
3014 
3015  ret = ff_hevc_frame_rps(s);
3016  if (ret < 0) {
3017  av_log(s->avctx, AV_LOG_ERROR, "Error constructing the frame RPS.\n");
3018  goto fail;
3019  }
3020 
3021  s->ref->frame->key_frame = IS_IRAP(s);
3022 
3023  s->ref->needs_fg = s->sei.film_grain_characteristics.present &&
3024  !(s->avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN) &&
3025  !s->avctx->hwaccel;
3026 
3027  if (s->ref->needs_fg) {
3028  s->ref->frame_grain->format = s->ref->frame->format;
3029  s->ref->frame_grain->width = s->ref->frame->width;
3030  s->ref->frame_grain->height = s->ref->frame->height;
3031  if ((ret = ff_thread_get_buffer(s->avctx, s->ref->frame_grain, 0)) < 0)
3032  goto fail;
3033  }
3034 
3035  ret = set_side_data(s);
3036  if (ret < 0)
3037  goto fail;
3038 
3039  s->frame->pict_type = 3 - s->sh.slice_type;
3040 
3041  if (!IS_IRAP(s))
3043 
3044  av_frame_unref(s->output_frame);
3045  ret = ff_hevc_output_frame(s, s->output_frame, 0);
3046  if (ret < 0)
3047  goto fail;
3048 
3049  if (!s->avctx->hwaccel)
3050  ff_thread_finish_setup(s->avctx);
3051 
3052  return 0;
3053 
3054 fail:
3055  if (s->ref)
3056  ff_hevc_unref_frame(s, s->ref, ~0);
3057  s->ref = NULL;
3058  return ret;
3059 }
3060 
3062 {
3063  HEVCFrame *out = s->ref;
3064  const AVFrameSideData *sd;
3065  int ret;
3066 
3067  if (out->needs_fg) {
3069  av_assert0(out->frame_grain->buf[0] && sd);
3070  ret = ff_h274_apply_film_grain(out->frame_grain, out->frame, &s->h274db,
3071  (AVFilmGrainParams *) sd->data);
3072 
3073  if (ret < 0) {
3074  av_log(s->avctx, AV_LOG_WARNING, "Failed synthesizing film "
3075  "grain, ignoring: %s\n", av_err2str(ret));
3076  out->needs_fg = 0;
3077  }
3078  }
3079 
3080  return 0;
3081 }
3082 
3083 static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal)
3084 {
3085  HEVCLocalContext *lc = s->HEVClc;
3086  GetBitContext *gb = &lc->gb;
3087  int ctb_addr_ts, ret;
3088 
3089  *gb = nal->gb;
3090  s->nal_unit_type = nal->type;
3091  s->temporal_id = nal->temporal_id;
3092 
3093  switch (s->nal_unit_type) {
3094  case HEVC_NAL_VPS:
3095  if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
3096  ret = s->avctx->hwaccel->decode_params(s->avctx,
3097  nal->type,
3098  nal->raw_data,
3099  nal->raw_size);
3100  if (ret < 0)
3101  goto fail;
3102  }
3103  ret = ff_hevc_decode_nal_vps(gb, s->avctx, &s->ps);
3104  if (ret < 0)
3105  goto fail;
3106  break;
3107  case HEVC_NAL_SPS:
3108  if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
3109  ret = s->avctx->hwaccel->decode_params(s->avctx,
3110  nal->type,
3111  nal->raw_data,
3112  nal->raw_size);
3113  if (ret < 0)
3114  goto fail;
3115  }
3116  ret = ff_hevc_decode_nal_sps(gb, s->avctx, &s->ps,
3117  s->apply_defdispwin);
3118  if (ret < 0)
3119  goto fail;
3120  break;
3121  case HEVC_NAL_PPS:
3122  if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
3123  ret = s->avctx->hwaccel->decode_params(s->avctx,
3124  nal->type,
3125  nal->raw_data,
3126  nal->raw_size);
3127  if (ret < 0)
3128  goto fail;
3129  }
3130  ret = ff_hevc_decode_nal_pps(gb, s->avctx, &s->ps);
3131  if (ret < 0)
3132  goto fail;
3133  break;
3134  case HEVC_NAL_SEI_PREFIX:
3135  case HEVC_NAL_SEI_SUFFIX:
3136  if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
3137  ret = s->avctx->hwaccel->decode_params(s->avctx,
3138  nal->type,
3139  nal->raw_data,
3140  nal->raw_size);
3141  if (ret < 0)
3142  goto fail;
3143  }
3144  ret = ff_hevc_decode_nal_sei(gb, s->avctx, &s->sei, &s->ps, s->nal_unit_type);
3145  if (ret < 0)
3146  goto fail;
3147  break;
3148  case HEVC_NAL_TRAIL_R:
3149  case HEVC_NAL_TRAIL_N:
3150  case HEVC_NAL_TSA_N:
3151  case HEVC_NAL_TSA_R:
3152  case HEVC_NAL_STSA_N:
3153  case HEVC_NAL_STSA_R:
3154  case HEVC_NAL_BLA_W_LP:
3155  case HEVC_NAL_BLA_W_RADL:
3156  case HEVC_NAL_BLA_N_LP:
3157  case HEVC_NAL_IDR_W_RADL:
3158  case HEVC_NAL_IDR_N_LP:
3159  case HEVC_NAL_CRA_NUT:
3160  case HEVC_NAL_RADL_N:
3161  case HEVC_NAL_RADL_R:
3162  case HEVC_NAL_RASL_N:
3163  case HEVC_NAL_RASL_R:
3164  ret = hls_slice_header(s);
3165  if (ret < 0)
3166  return ret;
3167  if (ret == 1) {
3169  goto fail;
3170  }
3171 
3172 
3173  if (
3174  (s->avctx->skip_frame >= AVDISCARD_BIDIR && s->sh.slice_type == HEVC_SLICE_B) ||
3175  (s->avctx->skip_frame >= AVDISCARD_NONINTRA && s->sh.slice_type != HEVC_SLICE_I) ||
3176  (s->avctx->skip_frame >= AVDISCARD_NONKEY && !IS_IRAP(s))) {
3177  break;
3178  }
3179 
3180  if (s->sh.first_slice_in_pic_flag) {
3181  if (s->max_ra == INT_MAX) {
3182  if (s->nal_unit_type == HEVC_NAL_CRA_NUT || IS_BLA(s)) {
3183  s->max_ra = s->poc;
3184  } else {
3185  if (IS_IDR(s))
3186  s->max_ra = INT_MIN;
3187  }
3188  }
3189 
3190  if ((s->nal_unit_type == HEVC_NAL_RASL_R || s->nal_unit_type == HEVC_NAL_RASL_N) &&
3191  s->poc <= s->max_ra) {
3192  s->is_decoded = 0;
3193  break;
3194  } else {
3195  if (s->nal_unit_type == HEVC_NAL_RASL_R && s->poc > s->max_ra)
3196  s->max_ra = INT_MIN;
3197  }
3198 
3199  s->overlap ++;
3200  ret = hevc_frame_start(s);
3201  if (ret < 0)
3202  return ret;
3203  } else if (!s->ref) {
3204  av_log(s->avctx, AV_LOG_ERROR, "First slice in a frame missing.\n");
3205  goto fail;
3206  }
3207 
3208  if (s->nal_unit_type != s->first_nal_type) {
3209  av_log(s->avctx, AV_LOG_ERROR,
3210  "Non-matching NAL types of the VCL NALUs: %d %d\n",
3211  s->first_nal_type, s->nal_unit_type);
3212  return AVERROR_INVALIDDATA;
3213  }
3214 
3215  if (!s->sh.dependent_slice_segment_flag &&
3216  s->sh.slice_type != HEVC_SLICE_I) {
3217  ret = ff_hevc_slice_rpl(s);
3218  if (ret < 0) {
3219  av_log(s->avctx, AV_LOG_WARNING,
3220  "Error constructing the reference lists for the current slice.\n");
3221  goto fail;
3222  }
3223  }
3224 
3225  if (s->sh.first_slice_in_pic_flag && s->avctx->hwaccel) {
3226  ret = s->avctx->hwaccel->start_frame(s->avctx, NULL, 0);
3227  if (ret < 0)
3228  goto fail;
3229  }
3230 
3231  if (s->avctx->hwaccel) {
3232  ret = s->avctx->hwaccel->decode_slice(s->avctx, nal->raw_data, nal->raw_size);
3233  if (ret < 0)
3234  goto fail;
3235  } else {
3236  if (s->threads_number > 1 && s->sh.num_entry_point_offsets > 0)
3237  ctb_addr_ts = hls_slice_data_wpp(s, nal);
3238  else
3239  ctb_addr_ts = hls_slice_data(s);
3240  if (ctb_addr_ts >= (s->ps.sps->ctb_width * s->ps.sps->ctb_height)) {
3241  ret = hevc_frame_end(s);
3242  if (ret < 0)
3243  goto fail;
3244  s->is_decoded = 1;
3245  }
3246 
3247  if (ctb_addr_ts < 0) {
3248  ret = ctb_addr_ts;
3249  goto fail;
3250  }
3251  }
3252  break;
3253  case HEVC_NAL_EOS_NUT:
3254  case HEVC_NAL_EOB_NUT:
3255  s->seq_decode = (s->seq_decode + 1) & HEVC_SEQUENCE_COUNTER_MASK;
3256  s->max_ra = INT_MAX;
3257  break;
3258  case HEVC_NAL_AUD:
3259  case HEVC_NAL_FD_NUT:
3260  case HEVC_NAL_UNSPEC62:
3261  break;
3262  default:
3263  av_log(s->avctx, AV_LOG_INFO,
3264  "Skipping NAL unit %d\n", s->nal_unit_type);
3265  }
3266 
3267  return 0;
3268 fail:
3269  if (s->avctx->err_recognition & AV_EF_EXPLODE)
3270  return ret;
3271  return 0;
3272 }
3273 
3274 static int decode_nal_units(HEVCContext *s, const uint8_t *buf, int length)
3275 {
3276  int i, ret = 0;
3277  int eos_at_start = 1;
3278 
3279  s->ref = NULL;
3280  s->last_eos = s->eos;
3281  s->eos = 0;
3282  s->overlap = 0;
3283 
3284  /* split the input packet into NAL units, so we know the upper bound on the
3285  * number of slices in the frame */
3286  ret = ff_h2645_packet_split(&s->pkt, buf, length, s->avctx, s->is_nalff,
3287  s->nal_length_size, s->avctx->codec_id, 1, 0);
3288  if (ret < 0) {
3289  av_log(s->avctx, AV_LOG_ERROR,
3290  "Error splitting the input into NAL units.\n");
3291  return ret;
3292  }
3293 
3294  for (i = 0; i < s->pkt.nb_nals; i++) {
3295  if (s->pkt.nals[i].type == HEVC_NAL_EOB_NUT ||
3296  s->pkt.nals[i].type == HEVC_NAL_EOS_NUT) {
3297  if (eos_at_start) {
3298  s->last_eos = 1;
3299  } else {
3300  s->eos = 1;
3301  }
3302  } else {
3303  eos_at_start = 0;
3304  }
3305  }
3306 
3307  /*
3308  * Check for RPU delimiter.
3309  *
3310  * Dolby Vision RPUs masquerade as unregistered NALs of type 62.
3311  *
3312  * We have to do this check here an create the rpu buffer, since RPUs are appended
3313  * to the end of an AU; they are the last non-EOB/EOS NAL in the AU.
3314  */
3315  if (s->pkt.nb_nals > 1 && s->pkt.nals[s->pkt.nb_nals - 1].type == HEVC_NAL_UNSPEC62 &&
3316  s->pkt.nals[s->pkt.nb_nals - 1].size > 2 && !s->pkt.nals[s->pkt.nb_nals - 1].nuh_layer_id
3317  && !s->pkt.nals[s->pkt.nb_nals - 1].temporal_id) {
3318  H2645NAL *nal = &s->pkt.nals[s->pkt.nb_nals - 1];
3319  if (s->rpu_buf) {
3320  av_buffer_unref(&s->rpu_buf);
3321  av_log(s->avctx, AV_LOG_WARNING, "Multiple Dolby Vision RPUs found in one AU. Skipping previous.\n");
3322  }
3323 
3324  s->rpu_buf = av_buffer_alloc(nal->raw_size - 2);
3325  if (!s->rpu_buf)
3326  return AVERROR(ENOMEM);
3327  memcpy(s->rpu_buf->data, nal->raw_data + 2, nal->raw_size - 2);
3328 
3329  ret = ff_dovi_rpu_parse(&s->dovi_ctx, nal->data + 2, nal->size - 2);
3330  if (ret < 0) {
3331  av_buffer_unref(&s->rpu_buf);
3332  av_log(s->avctx, AV_LOG_WARNING, "Error parsing DOVI NAL unit.\n");
3333  /* ignore */
3334  }
3335  }
3336 
3337  /* decode the NAL units */
3338  for (i = 0; i < s->pkt.nb_nals; i++) {
3339  H2645NAL *nal = &s->pkt.nals[i];
3340 
3341  if (s->avctx->skip_frame >= AVDISCARD_ALL ||
3342  (s->avctx->skip_frame >= AVDISCARD_NONREF
3343  && ff_hevc_nal_is_nonref(nal->type)) || nal->nuh_layer_id > 0)
3344  continue;
3345 
3346  ret = decode_nal_unit(s, nal);
3347  if (ret >= 0 && s->overlap > 2)
3349  if (ret < 0) {
3350  av_log(s->avctx, AV_LOG_WARNING,
3351  "Error parsing NAL unit #%d.\n", i);
3352  goto fail;
3353  }
3354  }
3355 
3356 fail:
3357  if (s->ref && s->threads_type == FF_THREAD_FRAME)
3358  ff_thread_report_progress(&s->ref->tf, INT_MAX, 0);
3359 
3360  return ret;
3361 }
3362 
3364 {
3366  char msg_buf[4 * (50 + 2 * 2 * 16 /* MD5-size */)];
3367  int pixel_shift;
3368  int err = 0;
3369  int i, j;
3370 
3371  if (!desc)
3372  return AVERROR(EINVAL);
3373 
3374  pixel_shift = desc->comp[0].depth > 8;
3375 
3376  /* the checksums are LE, so we have to byteswap for >8bpp formats
3377  * on BE arches */
3378 #if HAVE_BIGENDIAN
3379  if (pixel_shift && !s->checksum_buf) {
3380  av_fast_malloc(&s->checksum_buf, &s->checksum_buf_size,
3381  FFMAX3(frame->linesize[0], frame->linesize[1],
3382  frame->linesize[2]));
3383  if (!s->checksum_buf)
3384  return AVERROR(ENOMEM);
3385  }
3386 #endif
3387 
3388  msg_buf[0] = '\0';
3389  for (i = 0; frame->data[i]; i++) {
3390  int width = s->avctx->coded_width;
3391  int height = s->avctx->coded_height;
3392  int w = (i == 1 || i == 2) ? (width >> desc->log2_chroma_w) : width;
3393  int h = (i == 1 || i == 2) ? (height >> desc->log2_chroma_h) : height;
3394  uint8_t md5[16];
3395 
3396  av_md5_init(s->md5_ctx);
3397  for (j = 0; j < h; j++) {
3398  const uint8_t *src = frame->data[i] + j * frame->linesize[i];
3399 #if HAVE_BIGENDIAN
3400  if (pixel_shift) {
3401  s->bdsp.bswap16_buf((uint16_t *) s->checksum_buf,
3402  (const uint16_t *) src, w);
3403  src = s->checksum_buf;
3404  }
3405 #endif
3406  av_md5_update(s->md5_ctx, src, w << pixel_shift);
3407  }
3408  av_md5_final(s->md5_ctx, md5);
3409 
3410 #define MD5_PRI "%016" PRIx64 "%016" PRIx64
3411 #define MD5_PRI_ARG(buf) AV_RB64(buf), AV_RB64((const uint8_t*)(buf) + 8)
3412 
3413  if (!memcmp(md5, s->sei.picture_hash.md5[i], 16)) {
3414  av_strlcatf(msg_buf, sizeof(msg_buf),
3415  "plane %d - correct " MD5_PRI "; ",
3416  i, MD5_PRI_ARG(md5));
3417  } else {
3418  av_strlcatf(msg_buf, sizeof(msg_buf),
3419  "mismatching checksum of plane %d - " MD5_PRI " != " MD5_PRI "; ",
3420  i, MD5_PRI_ARG(md5), MD5_PRI_ARG(s->sei.picture_hash.md5[i]));
3421  err = AVERROR_INVALIDDATA;
3422  }
3423  }
3424 
3425  av_log(s->avctx, err < 0 ? AV_LOG_ERROR : AV_LOG_DEBUG,
3426  "Verifying checksum for frame with POC %d: %s\n",
3427  s->poc, msg_buf);
3428 
3429  return err;
3430 }
3431 
3432 static int hevc_decode_extradata(HEVCContext *s, uint8_t *buf, int length, int first)
3433 {
3434  int ret, i;
3435 
3436  ret = ff_hevc_decode_extradata(buf, length, &s->ps, &s->sei, &s->is_nalff,
3437  &s->nal_length_size, s->avctx->err_recognition,
3438  s->apply_defdispwin, s->avctx);
3439  if (ret < 0)
3440  return ret;
3441 
3442  /* export stream parameters from the first SPS */
3443  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.sps_list); i++) {
3444  if (first && s->ps.sps_list[i]) {
3445  const HEVCSPS *sps = (const HEVCSPS*)s->ps.sps_list[i]->data;
3447  break;
3448  }
3449  }
3450 
3451  /* export stream parameters from SEI */
3453  if (ret < 0)
3454  return ret;
3455 
3456  return 0;
3457 }
3458 
3459 static int hevc_decode_frame(AVCodecContext *avctx, AVFrame *rframe,
3460  int *got_output, AVPacket *avpkt)
3461 {
3462  int ret;
3463  uint8_t *sd;
3464  size_t sd_size;
3465  HEVCContext *s = avctx->priv_data;
3466 
3467  if (!avpkt->size) {
3468  ret = ff_hevc_output_frame(s, rframe, 1);
3469  if (ret < 0)
3470  return ret;
3471 
3472  *got_output = ret;
3473  return 0;
3474  }
3475 
3476  sd = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, &sd_size);
3477  if (sd && sd_size > 0) {
3478  ret = hevc_decode_extradata(s, sd, sd_size, 0);
3479  if (ret < 0)
3480  return ret;
3481  }
3482 
3483  sd = av_packet_get_side_data(avpkt, AV_PKT_DATA_DOVI_CONF, &sd_size);
3484  if (sd && sd_size > 0)
3486 
3487  s->ref = NULL;
3488  ret = decode_nal_units(s, avpkt->data, avpkt->size);
3489  if (ret < 0)
3490  return ret;
3491 
3492  if (avctx->hwaccel) {
3493  if (s->ref && (ret = avctx->hwaccel->end_frame(avctx)) < 0) {
3494  av_log(avctx, AV_LOG_ERROR,
3495  "hardware accelerator failed to decode picture\n");
3496  ff_hevc_unref_frame(s, s->ref, ~0);
3497  return ret;
3498  }
3499  } else {
3500  /* verify the SEI checksum */
3501  if (avctx->err_recognition & AV_EF_CRCCHECK && s->is_decoded &&
3502  s->sei.picture_hash.is_md5) {
3503  ret = verify_md5(s, s->ref->frame);
3504  if (ret < 0 && avctx->err_recognition & AV_EF_EXPLODE) {
3505  ff_hevc_unref_frame(s, s->ref, ~0);
3506  return ret;
3507  }
3508  }
3509  }
3510  s->sei.picture_hash.is_md5 = 0;
3511 
3512  if (s->is_decoded) {
3513  av_log(avctx, AV_LOG_DEBUG, "Decoded frame with POC %d.\n", s->poc);
3514  s->is_decoded = 0;
3515  }
3516 
3517  if (s->output_frame->buf[0]) {
3518  av_frame_move_ref(rframe, s->output_frame);
3519  *got_output = 1;
3520  }
3521 
3522  return avpkt->size;
3523 }
3524 
3526 {
3527  int ret;
3528 
3529  ret = ff_thread_ref_frame(&dst->tf, &src->tf);
3530  if (ret < 0)
3531  return ret;
3532 
3533  if (src->needs_fg) {
3534  ret = av_frame_ref(dst->frame_grain, src->frame_grain);
3535  if (ret < 0)
3536  return ret;
3537  dst->needs_fg = 1;
3538  }
3539 
3540  dst->tab_mvf_buf = av_buffer_ref(src->tab_mvf_buf);
3541  if (!dst->tab_mvf_buf)
3542  goto fail;
3543  dst->tab_mvf = src->tab_mvf;
3544 
3545  dst->rpl_tab_buf = av_buffer_ref(src->rpl_tab_buf);
3546  if (!dst->rpl_tab_buf)
3547  goto fail;
3548  dst->rpl_tab = src->rpl_tab;
3549 
3550  dst->rpl_buf = av_buffer_ref(src->rpl_buf);
3551  if (!dst->rpl_buf)
3552  goto fail;
3553 
3554  dst->poc = src->poc;
3555  dst->ctb_count = src->ctb_count;
3556  dst->flags = src->flags;
3557  dst->sequence = src->sequence;
3558 
3559  if (src->hwaccel_picture_private) {
3560  dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
3561  if (!dst->hwaccel_priv_buf)
3562  goto fail;
3564  }
3565 
3566  return 0;
3567 fail:
3568  ff_hevc_unref_frame(s, dst, ~0);
3569  return AVERROR(ENOMEM);
3570 }
3571 
3573 {
3574  HEVCContext *s = avctx->priv_data;
3575  int i;
3576 
3577  pic_arrays_free(s);
3578 
3579  ff_dovi_ctx_unref(&s->dovi_ctx);
3580  av_buffer_unref(&s->rpu_buf);
3581 
3582  av_freep(&s->md5_ctx);
3583 
3584  for (i = 0; i < 3; i++) {
3585  av_freep(&s->sao_pixel_buffer_h[i]);
3586  av_freep(&s->sao_pixel_buffer_v[i]);
3587  }
3588  av_frame_free(&s->output_frame);
3589 
3590  for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3591  ff_hevc_unref_frame(s, &s->DPB[i], ~0);
3592  av_frame_free(&s->DPB[i].frame);
3593  av_frame_free(&s->DPB[i].frame_grain);
3594  }
3595 
3596  ff_hevc_ps_uninit(&s->ps);
3597 
3598  av_freep(&s->sh.entry_point_offset);
3599  av_freep(&s->sh.offset);
3600  av_freep(&s->sh.size);
3601 
3602  if (s->HEVClcList) {
3603  for (i = 1; i < s->threads_number; i++) {
3604  av_freep(&s->HEVClcList[i]);
3605  }
3606  }
3607  av_freep(&s->HEVClc);
3608  av_freep(&s->HEVClcList);
3609 
3610  ff_h2645_packet_uninit(&s->pkt);
3611 
3612  ff_hevc_reset_sei(&s->sei);
3613 
3614  return 0;
3615 }
3616 
3618 {
3619  HEVCContext *s = avctx->priv_data;
3620  int i;
3621 
3622  s->avctx = avctx;
3623 
3624  s->HEVClc = av_mallocz(sizeof(HEVCLocalContext));
3625  s->HEVClcList = av_mallocz(sizeof(HEVCLocalContext*) * s->threads_number);
3626  if (!s->HEVClc || !s->HEVClcList)
3627  return AVERROR(ENOMEM);
3628  s->HEVClc->parent = s;
3629  s->HEVClc->logctx = avctx;
3630  s->HEVClc->common_cabac_state = &s->cabac;
3631  s->HEVClcList[0] = s->HEVClc;
3632 
3633  s->output_frame = av_frame_alloc();
3634  if (!s->output_frame)
3635  return AVERROR(ENOMEM);
3636 
3637  for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3638  s->DPB[i].frame = av_frame_alloc();
3639  if (!s->DPB[i].frame)
3640  return AVERROR(ENOMEM);
3641  s->DPB[i].tf.f = s->DPB[i].frame;
3642 
3643  s->DPB[i].frame_grain = av_frame_alloc();
3644  if (!s->DPB[i].frame_grain)
3645  return AVERROR(ENOMEM);
3646  }
3647 
3648  s->max_ra = INT_MAX;
3649 
3650  s->md5_ctx = av_md5_alloc();
3651  if (!s->md5_ctx)
3652  return AVERROR(ENOMEM);
3653 
3654  ff_bswapdsp_init(&s->bdsp);
3655 
3656  s->dovi_ctx.logctx = avctx;
3657  s->eos = 0;
3658 
3659  ff_hevc_reset_sei(&s->sei);
3660 
3661  return 0;
3662 }
3663 
3664 #if HAVE_THREADS
3665 static int hevc_update_thread_context(AVCodecContext *dst,
3666  const AVCodecContext *src)
3667 {
3668  HEVCContext *s = dst->priv_data;
3669  HEVCContext *s0 = src->priv_data;
3670  int i, ret;
3671 
3672  for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3673  ff_hevc_unref_frame(s, &s->DPB[i], ~0);
3674  if (s0->DPB[i].frame->buf[0]) {
3675  ret = hevc_ref_frame(s, &s->DPB[i], &s0->DPB[i]);
3676  if (ret < 0)
3677  return ret;
3678  }
3679  }
3680 
3681  if (s->ps.sps != s0->ps.sps)
3682  s->ps.sps = NULL;
3683  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.vps_list); i++) {
3684  ret = av_buffer_replace(&s->ps.vps_list[i], s0->ps.vps_list[i]);
3685  if (ret < 0)
3686  return ret;
3687  }
3688 
3689  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.sps_list); i++) {
3690  ret = av_buffer_replace(&s->ps.sps_list[i], s0->ps.sps_list[i]);
3691  if (ret < 0)
3692  return ret;
3693  }
3694 
3695  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.pps_list); i++) {
3696  ret = av_buffer_replace(&s->ps.pps_list[i], s0->ps.pps_list[i]);
3697  if (ret < 0)
3698  return ret;
3699  }
3700 
3701  if (s->ps.sps != s0->ps.sps)
3702  if ((ret = set_sps(s, s0->ps.sps, src->pix_fmt)) < 0)
3703  return ret;
3704 
3705  s->seq_decode = s0->seq_decode;
3706  s->seq_output = s0->seq_output;
3707  s->pocTid0 = s0->pocTid0;
3708  s->max_ra = s0->max_ra;
3709  s->eos = s0->eos;
3710  s->no_rasl_output_flag = s0->no_rasl_output_flag;
3711 
3712  s->is_nalff = s0->is_nalff;
3713  s->nal_length_size = s0->nal_length_size;
3714 
3715  s->threads_number = s0->threads_number;
3716  s->threads_type = s0->threads_type;
3717 
3718  if (s0->eos) {
3719  s->seq_decode = (s->seq_decode + 1) & HEVC_SEQUENCE_COUNTER_MASK;
3720  s->max_ra = INT_MAX;
3721  }
3722 
3723  ret = av_buffer_replace(&s->sei.a53_caption.buf_ref, s0->sei.a53_caption.buf_ref);
3724  if (ret < 0)
3725  return ret;
3726 
3727  for (i = 0; i < s->sei.unregistered.nb_buf_ref; i++)
3728  av_buffer_unref(&s->sei.unregistered.buf_ref[i]);
3729  s->sei.unregistered.nb_buf_ref = 0;
3730 
3731  if (s0->sei.unregistered.nb_buf_ref) {
3732  ret = av_reallocp_array(&s->sei.unregistered.buf_ref,
3733  s0->sei.unregistered.nb_buf_ref,
3734  sizeof(*s->sei.unregistered.buf_ref));
3735  if (ret < 0)
3736  return ret;
3737 
3738  for (i = 0; i < s0->sei.unregistered.nb_buf_ref; i++) {
3739  s->sei.unregistered.buf_ref[i] = av_buffer_ref(s0->sei.unregistered.buf_ref[i]);
3740  if (!s->sei.unregistered.buf_ref[i])
3741  return AVERROR(ENOMEM);
3742  s->sei.unregistered.nb_buf_ref++;
3743  }
3744  }
3745 
3746  ret = av_buffer_replace(&s->sei.dynamic_hdr_plus.info, s0->sei.dynamic_hdr_plus.info);
3747  if (ret < 0)
3748  return ret;
3749 
3750  ret = av_buffer_replace(&s->rpu_buf, s0->rpu_buf);
3751  if (ret < 0)
3752  return ret;
3753 
3754  ret = ff_dovi_ctx_replace(&s->dovi_ctx, &s0->dovi_ctx);
3755  if (ret < 0)
3756  return ret;
3757 
3758  ret = av_buffer_replace(&s->sei.dynamic_hdr_vivid.info, s0->sei.dynamic_hdr_vivid.info);
3759  if (ret < 0)
3760  return ret;
3761 
3762  s->sei.frame_packing = s0->sei.frame_packing;
3763  s->sei.display_orientation = s0->sei.display_orientation;
3764  s->sei.mastering_display = s0->sei.mastering_display;
3765  s->sei.content_light = s0->sei.content_light;
3766  s->sei.alternative_transfer = s0->sei.alternative_transfer;
3767 
3769  if (ret < 0)
3770  return ret;
3771 
3772  return 0;
3773 }
3774 #endif
3775 
3777 {
3778  HEVCContext *s = avctx->priv_data;
3779  int ret;
3780 
3781  if (avctx->active_thread_type & FF_THREAD_SLICE) {
3782  s->threads_number = avctx->thread_count;
3784  if (ret < 0)
3785  return ret;
3786  } else
3787  s->threads_number = 1;
3788 
3789  if((avctx->active_thread_type & FF_THREAD_FRAME) && avctx->thread_count > 1)
3790  s->threads_type = FF_THREAD_FRAME;
3791  else
3792  s->threads_type = FF_THREAD_SLICE;
3793 
3794  ret = hevc_init_context(avctx);
3795  if (ret < 0)
3796  return ret;
3797 
3798  s->enable_parallel_tiles = 0;
3799  s->sei.picture_timing.picture_struct = 0;
3800  s->eos = 1;
3801 
3802  atomic_init(&s->wpp_err, 0);
3803 
3804  if (!avctx->internal->is_copy) {
3805  if (avctx->extradata_size > 0 && avctx->extradata) {
3806  ret = hevc_decode_extradata(s, avctx->extradata, avctx->extradata_size, 1);
3807  if (ret < 0) {
3808  return ret;
3809  }
3810  }
3811  }
3812 
3813  return 0;
3814 }
3815 
3817 {
3818  HEVCContext *s = avctx->priv_data;
3820  ff_hevc_reset_sei(&s->sei);
3821  ff_dovi_ctx_flush(&s->dovi_ctx);
3822  av_buffer_unref(&s->rpu_buf);
3823  s->max_ra = INT_MAX;
3824  s->eos = 1;
3825 }
3826 
3827 #define OFFSET(x) offsetof(HEVCContext, x)
3828 #define PAR (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
3829 
3830 static const AVOption options[] = {
3831  { "apply_defdispwin", "Apply default display window from VUI", OFFSET(apply_defdispwin),
3832  AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, PAR },
3833  { "strict-displaywin", "stricly apply default display window size", OFFSET(apply_defdispwin),
3834  AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, PAR },
3835  { NULL },
3836 };
3837 
3838 static const AVClass hevc_decoder_class = {
3839  .class_name = "HEVC decoder",
3840  .item_name = av_default_item_name,
3841  .option = options,
3842  .version = LIBAVUTIL_VERSION_INT,
3843 };
3844 
3846  .p.name = "hevc",
3847  .p.long_name = NULL_IF_CONFIG_SMALL("HEVC (High Efficiency Video Coding)"),
3848  .p.type = AVMEDIA_TYPE_VIDEO,
3849  .p.id = AV_CODEC_ID_HEVC,
3850  .priv_data_size = sizeof(HEVCContext),
3851  .p.priv_class = &hevc_decoder_class,
3852  .init = hevc_decode_init,
3853  .close = hevc_decode_free,
3855  .flush = hevc_decode_flush,
3856  .update_thread_context = ONLY_IF_THREADS_ENABLED(hevc_update_thread_context),
3857  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY |
3859  .caps_internal = FF_CODEC_CAP_EXPORTS_CROPPING |
3861  .p.profiles = NULL_IF_CONFIG_SMALL(ff_hevc_profiles),
3862  .hw_configs = (const AVCodecHWConfigInternal *const []) {
3863 #if CONFIG_HEVC_DXVA2_HWACCEL
3864  HWACCEL_DXVA2(hevc),
3865 #endif
3866 #if CONFIG_HEVC_D3D11VA_HWACCEL
3867  HWACCEL_D3D11VA(hevc),
3868 #endif
3869 #if CONFIG_HEVC_D3D11VA2_HWACCEL
3870  HWACCEL_D3D11VA2(hevc),
3871 #endif
3872 #if CONFIG_HEVC_NVDEC_HWACCEL
3873  HWACCEL_NVDEC(hevc),
3874 #endif
3875 #if CONFIG_HEVC_VAAPI_HWACCEL
3876  HWACCEL_VAAPI(hevc),
3877 #endif
3878 #if CONFIG_HEVC_VDPAU_HWACCEL
3879  HWACCEL_VDPAU(hevc),
3880 #endif
3881 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
3882  HWACCEL_VIDEOTOOLBOX(hevc),
3883 #endif
3884  NULL
3885  },
3886 };
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:31
ff_hevc_sao_offset_sign_decode
int ff_hevc_sao_offset_sign_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:606
HEVCSEIFilmGrainCharacteristics::comp_model_present_flag
int comp_model_present_flag[3]
Definition: hevc_sei.h:129
verify_md5
static int verify_md5(HEVCContext *s, AVFrame *frame)
Definition: hevcdec.c:3363
hwconfig.h
AVMasteringDisplayMetadata::has_primaries
int has_primaries
Flag indicating whether the display primaries (and white point) are set.
Definition: mastering_display_metadata.h:62
MD5_PRI
#define MD5_PRI
HEVC_NAL_RADL_N
@ HEVC_NAL_RADL_N
Definition: hevc.h:35
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1388
SliceHeader::beta_offset
int beta_offset
beta_offset_div2 * 2
Definition: hevcdec.h:300
bswapdsp.h
L1
F H1 F F H1 F F F F H1<-F-------F-------F v v v H2 H3 H2 ^ ^ ^ F-------F-------F-> H1<-F-------F-------F|||||||||F H1 F|||||||||F H1 Funavailable fullpel samples(outside the picture for example) shall be equalto the closest available fullpel sampleSmaller pel interpolation:--------------------------if diag_mc is set then points which lie on a line between 2 vertically, horizontally or diagonally adjacent halfpel points shall be interpolatedlinearly with rounding to nearest and halfway values rounded up.points which lie on 2 diagonals at the same time should only use the onediagonal not containing the fullpel point F--> O q O<--h1-> O q O<--F v \/v \/v O O O O O O O|/|\|q q q q q|/|\|O O O O O O O ^/\ ^/\ ^ h2--> O q O<--h3-> O q O<--h2 v \/v \/v O O O O O O O|\|/|q q q q q|\|/|O O O O O O O ^/\ ^/\ ^ F--> O q O<--h1-> O q O<--Fthe remaining points shall be bilinearly interpolated from theup to 4 surrounding halfpel and fullpel points, again rounding should be tonearest and halfway values rounded upcompliant Snow decoders MUST support 1-1/8 pel luma and 1/2-1/16 pel chromainterpolation at leastOverlapped block motion compensation:-------------------------------------FIXMELL band prediction:===================Each sample in the LL0 subband is predicted by the median of the left, top andleft+top-topleft samples, samples outside the subband shall be considered tobe 0. To reverse this prediction in the decoder apply the following.for(y=0;y< height;y++){ for(x=0;x< width;x++){ sample[y][x]+=median(sample[y-1][x], sample[y][x-1], sample[y-1][x]+sample[y][x-1]-sample[y-1][x-1]);}}sample[-1][ *]=sample[ *][-1]=0;width, height here are the width and height of the LL0 subband not of the finalvideoDequantization:===============FIXMEWavelet Transform:==================Snow supports 2 wavelet transforms, the symmetric biorthogonal 5/3 integertransform and an integer approximation of the symmetric biorthogonal 9/7daubechies wavelet.2D IDWT(inverse discrete wavelet transform) --------------------------------------------The 2D IDWT applies a 2D filter recursively, each time combining the4 lowest frequency subbands into a single subband until only 1 subbandremains.The 2D filter is done by first applying a 1D filter in the vertical directionand then applying it in the horizontal one. --------------- --------------- --------------- ---------------|LL0|HL0|||||||||||||---+---|HL1||L0|H0|HL1||LL1|HL1|||||LH0|HH0|||||||||||||-------+-------|-> L1 H1 LH1 HH1 LH1 HH1 LH1 HH1 L1
Definition: snow.txt:554
av_buffer_pool_init
AVBufferPool * av_buffer_pool_init(size_t size, AVBufferRef *(*alloc)(size_t size))
Allocate and initialize a buffer pool.
Definition: buffer.c:280
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_TIMECODE_STR_SIZE
#define AV_TIMECODE_STR_SIZE
Definition: timecode.h:33
HEVCLocalContext
Definition: hevcdec.h:432
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:225
HEVCFrame::flags
uint8_t flags
A combination of HEVC_FRAME_FLAG_*.
Definition: hevcdec.h:429
AVMasteringDisplayMetadata::max_luminance
AVRational max_luminance
Max luminance of mastering display (cd/m^2).
Definition: mastering_display_metadata.h:57
HWACCEL_MAX
#define HWACCEL_MAX
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
HEVCFrame::tf
ThreadFrame tf
Definition: hevcdec.h:404
ff_hevc_hls_residual_coding
void ff_hevc_hls_residual_coding(HEVCLocalContext *lc, int x0, int y0, int log2_trafo_size, enum ScanType scan_idx, int c_idx)
Definition: hevc_cabac.c:1039
ff_hevc_skip_flag_decode
int ff_hevc_skip_flag_decode(HEVCLocalContext *lc, int x0, int y0, int x_cb, int y_cb)
Definition: hevc_cabac.c:628
HEVCFrame::hwaccel_priv_buf
AVBufferRef * hwaccel_priv_buf
Definition: hevcdec.h:417
ff_hevc_cu_chroma_qp_offset_idx
int ff_hevc_cu_chroma_qp_offset_idx(HEVCLocalContext *lc)
Definition: hevc_cabac.c:681
AV_STEREO3D_VIEW_LEFT
@ AV_STEREO3D_VIEW_LEFT
Frame contains only the left view.
Definition: stereo3d.h:156
av_clip
#define av_clip
Definition: common.h:95
atomic_store
#define atomic_store(object, desired)
Definition: stdatomic.h:85
ff_hevc_pcm_flag_decode
int ff_hevc_pcm_flag_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:755
set_deblocking_bypass
static void set_deblocking_bypass(const HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:1302
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:41
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:839
ff_hevc_pred_init
void ff_hevc_pred_init(HEVCPredContext *hpc, int bit_depth)
Definition: hevcpred.c:43
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AV_STEREO3D_SIDEBYSIDE_QUINCUNX
@ AV_STEREO3D_SIDEBYSIDE_QUINCUNX
Views are next to each other, but when upscaling apply a checkerboard pattern.
Definition: stereo3d.h:117
ff_dovi_ctx_unref
void ff_dovi_ctx_unref(DOVIContext *s)
Completely reset a DOVIContext, preserving only logctx.
Definition: dovi_rpu.c:43
HEVCSEIUnregistered
Definition: hevc_sei.h:65
chroma_mc_uni
static void chroma_mc_uni(HEVCLocalContext *lc, uint8_t *dst0, ptrdiff_t dststride, const uint8_t *src0, ptrdiff_t srcstride, int reflist, int x_off, int y_off, int block_w, int block_h, const struct MvField *current_mv, int chroma_weight, int chroma_offset)
8.5.3.2.2.2 Chroma sample uniprediction interpolation process
Definition: hevcdec.c:1650
hevc_decode_flush
static void hevc_decode_flush(AVCodecContext *avctx)
Definition: hevcdec.c:3816
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:975
PART_NxN
@ PART_NxN
Definition: hevcdec.h:145
decode_nal_unit
static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal)
Definition: hevcdec.c:3083
out
FILE * out
Definition: movenc.c:54
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:691
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:679
SAO_BAND
@ SAO_BAND
Definition: hevcdec.h:211
ff_hevc_profiles
const AVProfile ff_hevc_profiles[]
Definition: profiles.c:83
ff_hevc_pred_mode_decode
int ff_hevc_pred_mode_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:692
HEVCSEIFilmGrainCharacteristics::matrix_coeffs
int matrix_coeffs
Definition: hevc_sei.h:126
AVFilmGrainH274Params::color_space
enum AVColorSpace color_space
Definition: film_grain_params.h:152
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2675
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:59
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:119
AVMasteringDisplayMetadata::display_primaries
AVRational display_primaries[3][2]
CIE 1931 xy chromaticity coords of color primaries (r, g, b order).
Definition: mastering_display_metadata.h:42
src1
const pixel * src1
Definition: h264pred_template.c:421
AVMasteringDisplayMetadata::has_luminance
int has_luminance
Flag indicating whether the luminance (min_ and max_) have been set.
Definition: mastering_display_metadata.h:67
set_ct_depth
static av_always_inline void set_ct_depth(const HEVCContext *s, int x0, int y0, int log2_cb_size, int ct_depth)
Definition: hevcdec.c:2068
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1353
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:546
HEVCLocalContext::ctb_up_flag
uint8_t ctb_up_flag
Definition: hevcdec.h:465
HEVCFrame::needs_fg
int needs_fg
Definition: hevcdec.h:405
mv
static const int8_t mv[256][2]
Definition: 4xm.c:80
SliceHeader::num_entry_point_offsets
int num_entry_point_offsets
Definition: hevcdec.h:308
HEVC_NAL_STSA_N
@ HEVC_NAL_STSA_N
Definition: hevc.h:33
HEVCFrame::frame_grain
AVFrame * frame_grain
Definition: hevcdec.h:403
AV_FRAME_DATA_FILM_GRAIN_PARAMS
@ AV_FRAME_DATA_FILM_GRAIN_PARAMS
Film grain parameters for a frame, described by AVFilmGrainParams.
Definition: frame.h:184
PART_2NxnU
@ PART_2NxnU
Definition: hevcdec.h:146
AVFilmGrainH274Params::blending_mode_id
int blending_mode_id
Specifies the blending mode used to blend the simulated film grain with the decoded images.
Definition: film_grain_params.h:160
ff_hevc_luma_mv_mvp_mode
void ff_hevc_luma_mv_mvp_mode(HEVCLocalContext *lc, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv, int mvp_lx_flag, int LX)
Definition: hevc_mvs.c:583
AV_FRAME_DATA_S12M_TIMECODE
@ AV_FRAME_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1.
Definition: frame.h:152
av_mod_uintp2
#define av_mod_uintp2
Definition: common.h:122
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:116
AVContentLightMetadata::MaxCLL
unsigned MaxCLL
Max content light level (cd/m^2).
Definition: mastering_display_metadata.h:102
luma_intra_pred_mode
static int luma_intra_pred_mode(HEVCLocalContext *lc, int x0, int y0, int pu_size, int prev_intra_luma_pred_flag)
8.4.1
Definition: hevcdec.c:1988
H2645NAL::nuh_layer_id
int nuh_layer_id
Definition: h2645_parse.h:67
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
pixdesc.h
HEVCFrame::tab_mvf
MvField * tab_mvf
Definition: hevcdec.h:406
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:968
TransformUnit::cu_qp_delta
int cu_qp_delta
Definition: hevcdec.h:373
HEVC_NAL_TSA_N
@ HEVC_NAL_TSA_N
Definition: hevc.h:31
ff_hevc_cu_transquant_bypass_flag_decode
int ff_hevc_cu_transquant_bypass_flag_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:623
w
uint8_t w
Definition: llviddspenc.c:38
HWACCEL_DXVA2
#define HWACCEL_DXVA2(codec)
Definition: hwconfig.h:67
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:601
HEVCFrame::hwaccel_picture_private
void * hwaccel_picture_private
Definition: hevcdec.h:418
av_display_matrix_flip
void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip)
Flip the input matrix horizontally and/or vertically.
Definition: display.c:66
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:374
PAR
#define PAR
Definition: hevcdec.c:3828
INTRA_DC
@ INTRA_DC
Definition: hevcdec.h:173
AVOption
AVOption.
Definition: opt.h:251
HWACCEL_D3D11VA2
#define HWACCEL_D3D11VA2(codec)
Definition: hwconfig.h:69
ff_h2645_packet_uninit
void ff_h2645_packet_uninit(H2645Packet *pkt)
Free all the allocated memory in the packet.
Definition: h2645_parse.c:528
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:501
hevc_decode_free
static av_cold int hevc_decode_free(AVCodecContext *avctx)
Definition: hevcdec.c:3572
data
const char data[16]
Definition: mxf.c:146
Mv::y
int16_t y
vertical component of motion vector
Definition: hevcdec.h:344
AV_FRAME_DATA_DOVI_RPU_BUFFER
@ AV_FRAME_DATA_DOVI_RPU_BUFFER
Dolby Vision RPU raw data, suitable for passing to x265 or other libraries.
Definition: frame.h:197
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:408
SAO_EDGE
@ SAO_EDGE
Definition: hevcdec.h:212
atomic_int
intptr_t atomic_int
Definition: stdatomic.h:55
SliceHeader::slice_temporal_mvp_enabled_flag
uint8_t slice_temporal_mvp_enabled_flag
Definition: hevcdec.h:280
MvField::mv
Mv mv[2]
Definition: hevcdec.h:348
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:219
TransformUnit::is_cu_qp_delta_coded
uint8_t is_cu_qp_delta_coded
Definition: hevcdec.h:381
FFCodec
Definition: codec_internal.h:118
HEVC_NAL_RASL_N
@ HEVC_NAL_RASL_N
Definition: hevc.h:37
ff_hevc_intra_chroma_pred_mode_decode
int ff_hevc_intra_chroma_pred_mode_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:783
HEVC_NAL_STSA_R
@ HEVC_NAL_STSA_R
Definition: hevc.h:34
MODE_INTRA
@ MODE_INTRA
Definition: hevcdec.h:154
AVFilmGrainH274Params::color_range
enum AVColorRange color_range
Definition: film_grain_params.h:149
av_display_rotation_set
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure clockwise rotation by the specified angle (in de...
Definition: display.c:51
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
HEVC_NAL_BLA_W_RADL
@ HEVC_NAL_BLA_W_RADL
Definition: hevc.h:46
SliceHeader::slice_loop_filter_across_slices_enabled_flag
uint8_t slice_loop_filter_across_slices_enabled_flag
Definition: hevcdec.h:289
SAOParams::offset_sign
int offset_sign[3][4]
sao_offset_sign
Definition: hevcdsp.h:36
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
export_stream_params
static void export_stream_params(HEVCContext *s, const HEVCSPS *sps)
Definition: hevcdec.c:325
HEVCLocalContext::ctb_up_left_flag
uint8_t ctb_up_left_flag
Definition: hevcdec.h:467
ff_dovi_ctx_replace
int ff_dovi_ctx_replace(DOVIContext *s, const DOVIContext *s0)
Definition: dovi_rpu.c:64
H2645NAL::temporal_id
int temporal_id
HEVC only, nuh_temporal_id_plus_1 - 1.
Definition: h2645_parse.h:62
av_timecode_get_smpte
uint32_t av_timecode_get_smpte(AVRational rate, int drop, int hh, int mm, int ss, int ff)
Convert sei info to SMPTE 12M binary representation.
Definition: timecode.c:69
RefPicList
Definition: hevcdec.h:241
AV_STEREO3D_VIEW_RIGHT
@ AV_STEREO3D_VIEW_RIGHT
Frame contains only the right view.
Definition: stereo3d.h:161
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:104
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:649
thread.h
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
OFFSET
#define OFFSET(x)
Definition: hevcdec.c:3827
AVFilmGrainParams::seed
uint64_t seed
Seed to use for the synthesis process, if the codec allows for it.
Definition: film_grain_params.h:228
PF_INTRA
@ PF_INTRA
Definition: hevcdec.h:165
HEVC_MAX_REFS
@ HEVC_MAX_REFS
Definition: hevc.h:119
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:346
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
ff_hevc_cu_qp_delta_sign_flag
int ff_hevc_cu_qp_delta_sign_flag(HEVCLocalContext *lc)
Definition: hevc_cabac.c:671
hls_decode_neighbour
static void hls_decode_neighbour(HEVCLocalContext *lc, int x_ctb, int y_ctb, int ctb_addr_ts)
Definition: hevcdec.c:2423
MODE_SKIP
@ MODE_SKIP
Definition: hevcdec.h:155
HEVCLocalContext::end_of_tiles_x
int end_of_tiles_x
Definition: hevcdec.h:468
AVContentLightMetadata
Content light level needed by to transmit HDR over HDMI (CTA-861.3).
Definition: mastering_display_metadata.h:98
CodingUnit::x
int x
Definition: hevcdec.h:330
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
BOUNDARY_LEFT_TILE
#define BOUNDARY_LEFT_TILE
Definition: hevcdec.h:482
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:1741
golomb.h
exp golomb vlc stuff
AVCodecInternal::is_copy
int is_copy
When using frame-threaded decoding, this field is set for the first worker thread (e....
Definition: internal.h:57
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:67
PART_2Nx2N
@ PART_2Nx2N
Definition: hevcdec.h:142
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
SET_SAO
#define SET_SAO(elem, value)
Definition: hevcdec.c:993
HEVCLocalContext::ctb_up_right_flag
uint8_t ctb_up_right_flag
Definition: hevcdec.h:466
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:122
ff_hevc_clear_refs
void ff_hevc_clear_refs(HEVCContext *s)
Mark all frames in DPB as unused for reference.
Definition: hevc_refs.c:68
PRED_BI
@ PRED_BI
Definition: hevcdec.h:161
ff_hevc_log2_res_scale_abs
int ff_hevc_log2_res_scale_abs(HEVCLocalContext *lc, int idx)
Definition: hevc_cabac.c:909
ff_hevc_hls_mvd_coding
void ff_hevc_hls_mvd_coding(HEVCLocalContext *lc, int x0, int y0, int log2_cb_size)
Definition: hevc_cabac.c:1549
luma_mc_uni
static void luma_mc_uni(HEVCLocalContext *lc, uint8_t *dst, ptrdiff_t dststride, const AVFrame *ref, const Mv *mv, int x_off, int y_off, int block_w, int block_h, int luma_weight, int luma_offset)
8.5.3.2.2.1 Luma sample unidirectional interpolation process
Definition: hevcdec.c:1497
av_ceil_log2
#define av_ceil_log2
Definition: common.h:92
fail
#define fail()
Definition: checkasm.h:131
PredictionUnit::intra_pred_mode_c
uint8_t intra_pred_mode_c[4]
Definition: hevcdec.h:368
AVCodecContext::thread_count
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
Definition: avcodec.h:1472
md5
struct AVMD5 * md5
Definition: movenc.c:56
InterPredIdc
InterPredIdc
Definition: hevcdec.h:158
MODE_INTER
@ MODE_INTER
Definition: hevcdec.h:153
HEVCSEIA53Caption
Definition: hevc_sei.h:61
ff_hevc_hls_filter
void ff_hevc_hls_filter(HEVCLocalContext *lc, int x, int y, int ctb_size)
Definition: hevc_filter.c:851
timecode.h
GetBitContext
Definition: get_bits.h:61
HEVCLocalContext::pu
PredictionUnit pu
Definition: hevcdec.h:478
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
ff_hevc_cu_chroma_qp_offset_flag
int ff_hevc_cu_chroma_qp_offset_flag(HEVCLocalContext *lc)
Definition: hevc_cabac.c:676
decode_lt_rps
static int decode_lt_rps(HEVCContext *s, LongTermRPS *rps, GetBitContext *gb)
Definition: hevcdec.c:268
TransformUnit::res_scale_val
int res_scale_val
Definition: hevcdec.h:375
HEVCSEIFilmGrainCharacteristics::present
int present
Definition: hevc_sei.h:118
SliceHeader::short_term_ref_pic_set_size
int short_term_ref_pic_set_size
Definition: hevcdec.h:271
hevc_decoder_class
static const AVClass hevc_decoder_class
Definition: hevcdec.c:3838
val
static double val(void *priv, double ch)
Definition: aeval.c:77
HWACCEL_VDPAU
#define HWACCEL_VDPAU(codec)
Definition: hwconfig.h:75
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
ff_hevc_output_frame
int ff_hevc_output_frame(HEVCContext *s, AVFrame *out, int flush)
Find next frame in output order and put a reference to it in frame.
Definition: hevc_refs.c:186
HEVCSEIFilmGrainCharacteristics::bit_depth_chroma
int bit_depth_chroma
Definition: hevc_sei.h:122
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:586
SliceHeader::long_term_ref_pic_set_size
int long_term_ref_pic_set_size
Definition: hevcdec.h:274
HEVCSEIFilmGrainCharacteristics::log2_scale_factor
int log2_scale_factor
Definition: hevc_sei.h:128
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:260
CTB
#define CTB(tab, x, y)
Definition: hevcdec.c:991
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
ff_hevc_decode_nal_sei
int ff_hevc_decode_nal_sei(GetBitContext *gb, void *logctx, HEVCSEI *s, const HEVCParamSets *ps, enum HEVCNALUnitType type)
Definition: hevc_sei.c:552
AVRational::num
int num
Numerator.
Definition: rational.h:59
intra_prediction_unit
static void intra_prediction_unit(HEVCLocalContext *lc, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:2085
HEVC_NAL_UNSPEC62
@ HEVC_NAL_UNSPEC62
Definition: hevc.h:91
ff_hevc_deblocking_boundary_strengths
void ff_hevc_deblocking_boundary_strengths(HEVCLocalContext *lc, int x0, int y0, int log2_trafo_size)
Definition: hevc_filter.c:723
SliceHeader::slice_segment_addr
unsigned int slice_segment_addr
address (in raster order) of the first block in the current slice
Definition: hevcdec.h:256
AVFilmGrainH274Params::intensity_interval_upper_bound
uint8_t intensity_interval_upper_bound[3][256]
Specifies the upper bound of each intensity interval for which the set of model values applies for th...
Definition: film_grain_params.h:194
hevc_parse.h
MvField::ref_idx
int8_t ref_idx[2]
Definition: hevcdec.h:349
AVFilmGrainH274Params::bit_depth_luma
int bit_depth_luma
Specifies the bit depth used for the luma component.
Definition: film_grain_params.h:142
SAOParams::eo_class
int eo_class[3]
sao_eo_class
Definition: hevcdsp.h:40
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:104
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:411
hevc_luma_mv_mvp_mode
static void hevc_luma_mv_mvp_mode(HEVCLocalContext *lc, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv)
Definition: hevcdec.c:1817
ff_thread_report_progress2
void ff_thread_report_progress2(AVCodecContext *avctx, int field, int thread, int n)
Definition: pthread_slice.c:210
first
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But first
Definition: rate_distortion.txt:12
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:961
AV_STEREO3D_FRAMESEQUENCE
@ AV_STEREO3D_FRAMESEQUENCE
Views are alternated temporally.
Definition: stereo3d.h:92
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
QPEL_EXTRA_AFTER
#define QPEL_EXTRA_AFTER
Definition: hevcdec.h:64
HEVC_NAL_BLA_N_LP
@ HEVC_NAL_BLA_N_LP
Definition: hevc.h:47
SAOParams::type_idx
uint8_t type_idx[3]
sao_type_idx
Definition: hevcdsp.h:44
film_grain_params.h
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
TransformUnit::intra_pred_mode
int intra_pred_mode
Definition: hevcdec.h:378
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:667
HEVCSEIFilmGrainCharacteristics::full_range
int full_range
Definition: hevc_sei.h:123
HEVC_NAL_RADL_R
@ HEVC_NAL_RADL_R
Definition: hevc.h:36
hls_prediction_unit
static void hls_prediction_unit(HEVCLocalContext *lc, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int partIdx, int idx)
Definition: hevcdec.c:1862
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:595
SliceHeader::cabac_init_flag
uint8_t cabac_init_flag
Definition: hevcdec.h:287
H2645NAL::size
int size
Definition: h2645_parse.h:36
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:500
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:694
hls_transform_unit
static int hls_transform_unit(HEVCLocalContext *lc, int x0, int y0, int xBase, int yBase, int cb_xBase, int cb_yBase, int log2_cb_size, int log2_trafo_size, int blk_idx, int cbf_luma, int *cbf_cb, int *cbf_cr)
Definition: hevcdec.c:1095
VUI::matrix_coeffs
uint8_t matrix_coeffs
Definition: hevc_ps.h:61
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:260
QPEL_EXTRA_BEFORE
#define QPEL_EXTRA_BEFORE
Definition: hevcdec.h:63
ff_hevc_rem_intra_luma_pred_mode_decode
int ff_hevc_rem_intra_luma_pred_mode_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:773
ff_hevc_sao_merge_flag_decode
int ff_hevc_sao_merge_flag_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:571
stereo3d.h
AVMasteringDisplayMetadata::white_point
AVRational white_point[2]
CIE 1931 xy chromaticity coords of white point.
Definition: mastering_display_metadata.h:47
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:127
HEVCLocalContext::parent
const struct HEVCContext * parent
Definition: hevcdec.h:440
s
#define s(width, name)
Definition: cbs_vp9.c:256
ff_thread_await_progress2
void ff_thread_await_progress2(AVCodecContext *avctx, int field, int thread, int shift)
Definition: pthread_slice.c:222
SAO_NOT_APPLIED
@ SAO_NOT_APPLIED
Definition: hevcdec.h:210
set_sps
static int set_sps(HEVCContext *s, const HEVCSPS *sps, enum AVPixelFormat pix_fmt)
Definition: hevcdec.c:499
HEVCSEIFilmGrainCharacteristics::num_intensity_intervals
uint16_t num_intensity_intervals[3]
Definition: hevc_sei.h:130
AV_ZERO32
#define AV_ZERO32(d)
Definition: intreadwrite.h:629
ff_hevc_nal_is_nonref
static av_always_inline int ff_hevc_nal_is_nonref(enum HEVCNALUnitType type)
Definition: hevcdec.h:661
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
av_film_grain_params_create_side_data
AVFilmGrainParams * av_film_grain_params_create_side_data(AVFrame *frame)
Allocate a complete AVFilmGrainParams and add it to the frame.
Definition: film_grain_params.c:31
ff_hevc_set_new_ref
int ff_hevc_set_new_ref(HEVCContext *s, AVFrame **frame, int poc)
Definition: hevc_refs.c:137
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
SliceHeader::slice_rps
ShortTermRPS slice_rps
Definition: hevcdec.h:272
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
HEVCSEIFilmGrainCharacteristics::model_id
int model_id
Definition: hevc_sei.h:119
IS_IDR
#define IS_IDR(s)
Definition: hevcdec.h:75
H2645NAL::data
const uint8_t * data
Definition: h2645_parse.h:35
ff_hevc_slice_rpl
int ff_hevc_slice_rpl(HEVCContext *s)
Construct the reference picture list(s) for the current slice.
Definition: hevc_refs.c:312
RefPicList::ref
struct HEVCFrame * ref[HEVC_MAX_REFS]
Definition: hevcdec.h:242
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demuxing_decoding.c:41
H2645NAL::skipped_bytes_pos
int * skipped_bytes_pos
Definition: h2645_parse.h:71
VUI::colour_primaries
uint8_t colour_primaries
Definition: hevc_ps.h:59
HEVC_SLICE_I
@ HEVC_SLICE_I
Definition: hevc.h:98
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
SliceHeader::size
int * size
Definition: hevcdec.h:307
HEVCSEIUnregistered::buf_ref
AVBufferRef ** buf_ref
Definition: hevc_sei.h:66
ff_hevc_cabac_init
int ff_hevc_cabac_init(HEVCLocalContext *lc, int ctb_addr_ts)
Definition: hevc_cabac.c:512
AVFilmGrainH274Params::comp_model_value
int16_t comp_model_value[3][256][6]
Specifies the model values for the component for each intensity interval.
Definition: film_grain_params.h:205
ff_hevc_set_neighbour_available
void ff_hevc_set_neighbour_available(HEVCLocalContext *lc, int x0, int y0, int nPbW, int nPbH)
Definition: hevc_mvs.c:43
SliceHeader::collocated_list
uint8_t collocated_list
Definition: hevcdec.h:290
atomic_load
#define atomic_load(object)
Definition: stdatomic.h:93
AVCOL_PRI_UNSPECIFIED
@ AVCOL_PRI_UNSPECIFIED
Definition: pixfmt.h:476
AVDISCARD_BIDIR
@ AVDISCARD_BIDIR
discard all bidirectional frames
Definition: defs.h:51
get_se_golomb
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
Definition: golomb.h:239
INTRA_ANGULAR_26
@ INTRA_ANGULAR_26
Definition: hevcdec.h:198
H2645NAL::type
int type
NAL unit type.
Definition: h2645_parse.h:52
CodingUnit::max_trafo_depth
uint8_t max_trafo_depth
MaxTrafoDepth.
Definition: hevcdec.h:338
AV_FRAME_DATA_DYNAMIC_HDR_VIVID
@ AV_FRAME_DATA_DYNAMIC_HDR_VIVID
HDR Vivid dynamic metadata associated with a video frame.
Definition: frame.h:211
SliceHeader::slice_ctb_addr_rs
int slice_ctb_addr_rs
Definition: hevcdec.h:326
ff_thread_ref_frame
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
Definition: utils.c:891
FF_CODEC_PROPERTY_FILM_GRAIN
#define FF_CODEC_PROPERTY_FILM_GRAIN
Definition: avcodec.h:1856
arg
const char * arg
Definition: jacosubdec.c:67
AVStereo3D::flags
int flags
Additional information about the frame packing.
Definition: stereo3d.h:185
if
if(ret)
Definition: filter_design.txt:179
HEVC_NAL_IDR_N_LP
@ HEVC_NAL_IDR_N_LP
Definition: hevc.h:49
AVFilmGrainH274Params::model_id
int model_id
Specifies the film grain simulation mode.
Definition: film_grain_params.h:137
SliceHeader::pic_output_flag
uint8_t pic_output_flag
Definition: hevcdec.h:266
ff_hevc_cbf_cb_cr_decode
int ff_hevc_cbf_cb_cr_decode(HEVCLocalContext *lc, int trafo_depth)
Definition: hevc_cabac.c:884
hls_slice_data_wpp
static int hls_slice_data_wpp(HEVCContext *s, const H2645NAL *nal)
Definition: hevcdec.c:2628
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:113
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:54
threadframe.h
PredictionUnit::rem_intra_luma_pred_mode
int rem_intra_luma_pred_mode
Definition: hevcdec.h:364
H2645NAL::raw_size
int raw_size
Definition: h2645_parse.h:44
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
IS_BLA
#define IS_BLA(s)
Definition: hevcdec.h:76
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
ff_bswapdsp_init
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
HEVC_SLICE_B
@ HEVC_SLICE_B
Definition: hevc.h:96
NULL
#define NULL
Definition: coverity.c:32
hevc_ref_frame
static int hevc_ref_frame(HEVCContext *s, HEVCFrame *dst, HEVCFrame *src)
Definition: hevcdec.c:3525
HEVC_SEQUENCE_COUNTER_MASK
#define HEVC_SEQUENCE_COUNTER_MASK
Definition: hevcdec.h:398
HEVCSEIFilmGrainCharacteristics::separate_colour_description_present_flag
int separate_colour_description_present_flag
Definition: hevc_sei.h:120
hls_coding_unit
static int hls_coding_unit(HEVCLocalContext *lc, const HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:2176
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:982
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
HEVCLocalContext::tmp
int16_t tmp[MAX_PB_SIZE *MAX_PB_SIZE]
Definition: hevcdec.h:474