FFmpeg
hevcdec.c
Go to the documentation of this file.
1 /*
2  * HEVC video Decoder
3  *
4  * Copyright (C) 2012 - 2013 Guillaume Martres
5  * Copyright (C) 2012 - 2013 Mickael Raulet
6  * Copyright (C) 2012 - 2013 Gildas Cocherel
7  * Copyright (C) 2012 - 2013 Wassim Hamidouche
8  *
9  * This file is part of FFmpeg.
10  *
11  * FFmpeg is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public
13  * License as published by the Free Software Foundation; either
14  * version 2.1 of the License, or (at your option) any later version.
15  *
16  * FFmpeg is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with FFmpeg; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24  */
25 
26 #include "config_components.h"
27 
28 #include "libavutil/attributes.h"
29 #include "libavutil/avstring.h"
30 #include "libavutil/common.h"
31 #include "libavutil/display.h"
33 #include "libavutil/internal.h"
35 #include "libavutil/md5.h"
36 #include "libavutil/opt.h"
37 #include "libavutil/pixdesc.h"
38 #include "libavutil/timecode.h"
39 
40 #include "bswapdsp.h"
41 #include "bytestream.h"
42 #include "cabac_functions.h"
43 #include "codec_internal.h"
44 #include "decode.h"
45 #include "golomb.h"
46 #include "hevc.h"
47 #include "hevc_data.h"
48 #include "hevc_parse.h"
49 #include "hevcdec.h"
50 #include "hwconfig.h"
51 #include "internal.h"
52 #include "profiles.h"
53 #include "thread.h"
54 #include "threadframe.h"
55 
56 static const uint8_t hevc_pel_weight[65] = { [2] = 0, [4] = 1, [6] = 2, [8] = 3, [12] = 4, [16] = 5, [24] = 6, [32] = 7, [48] = 8, [64] = 9 };
57 
58 /**
59  * NOTE: Each function hls_foo correspond to the function foo in the
60  * specification (HLS stands for High Level Syntax).
61  */
62 
63 /**
64  * Section 5.7
65  */
66 
67 /* free everything allocated by pic_arrays_init() */
69 {
70  av_freep(&s->sao);
71  av_freep(&s->deblock);
72 
73  av_freep(&s->skip_flag);
74  av_freep(&s->tab_ct_depth);
75 
76  av_freep(&s->tab_ipm);
77  av_freep(&s->cbf_luma);
78  av_freep(&s->is_pcm);
79 
80  av_freep(&s->qp_y_tab);
81  av_freep(&s->tab_slice_address);
82  av_freep(&s->filter_slice_edges);
83 
84  av_freep(&s->horizontal_bs);
85  av_freep(&s->vertical_bs);
86 
87  av_freep(&s->sh.entry_point_offset);
88  av_freep(&s->sh.size);
89  av_freep(&s->sh.offset);
90 
91  av_buffer_pool_uninit(&s->tab_mvf_pool);
92  av_buffer_pool_uninit(&s->rpl_tab_pool);
93 }
94 
95 /* allocate arrays that depend on frame dimensions */
96 static int pic_arrays_init(HEVCContext *s, const HEVCSPS *sps)
97 {
98  int log2_min_cb_size = sps->log2_min_cb_size;
99  int width = sps->width;
100  int height = sps->height;
101  int pic_size_in_ctb = ((width >> log2_min_cb_size) + 1) *
102  ((height >> log2_min_cb_size) + 1);
103  int ctb_count = sps->ctb_width * sps->ctb_height;
104  int min_pu_size = sps->min_pu_width * sps->min_pu_height;
105 
106  s->bs_width = (width >> 2) + 1;
107  s->bs_height = (height >> 2) + 1;
108 
109  s->sao = av_calloc(ctb_count, sizeof(*s->sao));
110  s->deblock = av_calloc(ctb_count, sizeof(*s->deblock));
111  if (!s->sao || !s->deblock)
112  goto fail;
113 
114  s->skip_flag = av_malloc_array(sps->min_cb_height, sps->min_cb_width);
115  s->tab_ct_depth = av_malloc_array(sps->min_cb_height, sps->min_cb_width);
116  if (!s->skip_flag || !s->tab_ct_depth)
117  goto fail;
118 
119  s->cbf_luma = av_malloc_array(sps->min_tb_width, sps->min_tb_height);
120  s->tab_ipm = av_mallocz(min_pu_size);
121  s->is_pcm = av_malloc_array(sps->min_pu_width + 1, sps->min_pu_height + 1);
122  if (!s->tab_ipm || !s->cbf_luma || !s->is_pcm)
123  goto fail;
124 
125  s->filter_slice_edges = av_mallocz(ctb_count);
126  s->tab_slice_address = av_malloc_array(pic_size_in_ctb,
127  sizeof(*s->tab_slice_address));
128  s->qp_y_tab = av_malloc_array(pic_size_in_ctb,
129  sizeof(*s->qp_y_tab));
130  if (!s->qp_y_tab || !s->filter_slice_edges || !s->tab_slice_address)
131  goto fail;
132 
133  s->horizontal_bs = av_calloc(s->bs_width, s->bs_height);
134  s->vertical_bs = av_calloc(s->bs_width, s->bs_height);
135  if (!s->horizontal_bs || !s->vertical_bs)
136  goto fail;
137 
138  s->tab_mvf_pool = av_buffer_pool_init(min_pu_size * sizeof(MvField),
140  s->rpl_tab_pool = av_buffer_pool_init(ctb_count * sizeof(RefPicListTab),
142  if (!s->tab_mvf_pool || !s->rpl_tab_pool)
143  goto fail;
144 
145  return 0;
146 
147 fail:
149  return AVERROR(ENOMEM);
150 }
151 
153 {
154  int i = 0;
155  int j = 0;
156  uint8_t luma_weight_l0_flag[16];
157  uint8_t chroma_weight_l0_flag[16];
158  uint8_t luma_weight_l1_flag[16];
159  uint8_t chroma_weight_l1_flag[16];
160  int luma_log2_weight_denom;
161 
162  luma_log2_weight_denom = get_ue_golomb_long(gb);
163  if (luma_log2_weight_denom < 0 || luma_log2_weight_denom > 7) {
164  av_log(s->avctx, AV_LOG_ERROR, "luma_log2_weight_denom %d is invalid\n", luma_log2_weight_denom);
165  return AVERROR_INVALIDDATA;
166  }
167  s->sh.luma_log2_weight_denom = av_clip_uintp2(luma_log2_weight_denom, 3);
168  if (s->ps.sps->chroma_format_idc != 0) {
169  int64_t chroma_log2_weight_denom = luma_log2_weight_denom + (int64_t)get_se_golomb(gb);
170  if (chroma_log2_weight_denom < 0 || chroma_log2_weight_denom > 7) {
171  av_log(s->avctx, AV_LOG_ERROR, "chroma_log2_weight_denom %"PRId64" is invalid\n", chroma_log2_weight_denom);
172  return AVERROR_INVALIDDATA;
173  }
174  s->sh.chroma_log2_weight_denom = chroma_log2_weight_denom;
175  }
176 
177  for (i = 0; i < s->sh.nb_refs[L0]; i++) {
178  luma_weight_l0_flag[i] = get_bits1(gb);
179  if (!luma_weight_l0_flag[i]) {
180  s->sh.luma_weight_l0[i] = 1 << s->sh.luma_log2_weight_denom;
181  s->sh.luma_offset_l0[i] = 0;
182  }
183  }
184  if (s->ps.sps->chroma_format_idc != 0) {
185  for (i = 0; i < s->sh.nb_refs[L0]; i++)
186  chroma_weight_l0_flag[i] = get_bits1(gb);
187  } else {
188  for (i = 0; i < s->sh.nb_refs[L0]; i++)
189  chroma_weight_l0_flag[i] = 0;
190  }
191  for (i = 0; i < s->sh.nb_refs[L0]; i++) {
192  if (luma_weight_l0_flag[i]) {
193  int delta_luma_weight_l0 = get_se_golomb(gb);
194  if ((int8_t)delta_luma_weight_l0 != delta_luma_weight_l0)
195  return AVERROR_INVALIDDATA;
196  s->sh.luma_weight_l0[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l0;
197  s->sh.luma_offset_l0[i] = get_se_golomb(gb);
198  }
199  if (chroma_weight_l0_flag[i]) {
200  for (j = 0; j < 2; j++) {
201  int delta_chroma_weight_l0 = get_se_golomb(gb);
202  int delta_chroma_offset_l0 = get_se_golomb(gb);
203 
204  if ( (int8_t)delta_chroma_weight_l0 != delta_chroma_weight_l0
205  || delta_chroma_offset_l0 < -(1<<17) || delta_chroma_offset_l0 > (1<<17)) {
206  return AVERROR_INVALIDDATA;
207  }
208 
209  s->sh.chroma_weight_l0[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l0;
210  s->sh.chroma_offset_l0[i][j] = av_clip((delta_chroma_offset_l0 - ((128 * s->sh.chroma_weight_l0[i][j])
211  >> s->sh.chroma_log2_weight_denom) + 128), -128, 127);
212  }
213  } else {
214  s->sh.chroma_weight_l0[i][0] = 1 << s->sh.chroma_log2_weight_denom;
215  s->sh.chroma_offset_l0[i][0] = 0;
216  s->sh.chroma_weight_l0[i][1] = 1 << s->sh.chroma_log2_weight_denom;
217  s->sh.chroma_offset_l0[i][1] = 0;
218  }
219  }
220  if (s->sh.slice_type == HEVC_SLICE_B) {
221  for (i = 0; i < s->sh.nb_refs[L1]; i++) {
222  luma_weight_l1_flag[i] = get_bits1(gb);
223  if (!luma_weight_l1_flag[i]) {
224  s->sh.luma_weight_l1[i] = 1 << s->sh.luma_log2_weight_denom;
225  s->sh.luma_offset_l1[i] = 0;
226  }
227  }
228  if (s->ps.sps->chroma_format_idc != 0) {
229  for (i = 0; i < s->sh.nb_refs[L1]; i++)
230  chroma_weight_l1_flag[i] = get_bits1(gb);
231  } else {
232  for (i = 0; i < s->sh.nb_refs[L1]; i++)
233  chroma_weight_l1_flag[i] = 0;
234  }
235  for (i = 0; i < s->sh.nb_refs[L1]; i++) {
236  if (luma_weight_l1_flag[i]) {
237  int delta_luma_weight_l1 = get_se_golomb(gb);
238  if ((int8_t)delta_luma_weight_l1 != delta_luma_weight_l1)
239  return AVERROR_INVALIDDATA;
240  s->sh.luma_weight_l1[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l1;
241  s->sh.luma_offset_l1[i] = get_se_golomb(gb);
242  }
243  if (chroma_weight_l1_flag[i]) {
244  for (j = 0; j < 2; j++) {
245  int delta_chroma_weight_l1 = get_se_golomb(gb);
246  int delta_chroma_offset_l1 = get_se_golomb(gb);
247 
248  if ( (int8_t)delta_chroma_weight_l1 != delta_chroma_weight_l1
249  || delta_chroma_offset_l1 < -(1<<17) || delta_chroma_offset_l1 > (1<<17)) {
250  return AVERROR_INVALIDDATA;
251  }
252 
253  s->sh.chroma_weight_l1[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l1;
254  s->sh.chroma_offset_l1[i][j] = av_clip((delta_chroma_offset_l1 - ((128 * s->sh.chroma_weight_l1[i][j])
255  >> s->sh.chroma_log2_weight_denom) + 128), -128, 127);
256  }
257  } else {
258  s->sh.chroma_weight_l1[i][0] = 1 << s->sh.chroma_log2_weight_denom;
259  s->sh.chroma_offset_l1[i][0] = 0;
260  s->sh.chroma_weight_l1[i][1] = 1 << s->sh.chroma_log2_weight_denom;
261  s->sh.chroma_offset_l1[i][1] = 0;
262  }
263  }
264  }
265  return 0;
266 }
267 
269 {
270  const HEVCSPS *sps = s->ps.sps;
271  int max_poc_lsb = 1 << sps->log2_max_poc_lsb;
272  int prev_delta_msb = 0;
273  unsigned int nb_sps = 0, nb_sh;
274  int i;
275 
276  rps->nb_refs = 0;
277  if (!sps->long_term_ref_pics_present_flag)
278  return 0;
279 
280  if (sps->num_long_term_ref_pics_sps > 0)
281  nb_sps = get_ue_golomb_long(gb);
282  nb_sh = get_ue_golomb_long(gb);
283 
284  if (nb_sps > sps->num_long_term_ref_pics_sps)
285  return AVERROR_INVALIDDATA;
286  if (nb_sh + (uint64_t)nb_sps > FF_ARRAY_ELEMS(rps->poc))
287  return AVERROR_INVALIDDATA;
288 
289  rps->nb_refs = nb_sh + nb_sps;
290 
291  for (i = 0; i < rps->nb_refs; i++) {
292 
293  if (i < nb_sps) {
294  uint8_t lt_idx_sps = 0;
295 
296  if (sps->num_long_term_ref_pics_sps > 1)
297  lt_idx_sps = get_bits(gb, av_ceil_log2(sps->num_long_term_ref_pics_sps));
298 
299  rps->poc[i] = sps->lt_ref_pic_poc_lsb_sps[lt_idx_sps];
300  rps->used[i] = sps->used_by_curr_pic_lt_sps_flag[lt_idx_sps];
301  } else {
302  rps->poc[i] = get_bits(gb, sps->log2_max_poc_lsb);
303  rps->used[i] = get_bits1(gb);
304  }
305 
306  rps->poc_msb_present[i] = get_bits1(gb);
307  if (rps->poc_msb_present[i]) {
308  int64_t delta = get_ue_golomb_long(gb);
309  int64_t poc;
310 
311  if (i && i != nb_sps)
312  delta += prev_delta_msb;
313 
314  poc = rps->poc[i] + s->poc - delta * max_poc_lsb - s->sh.pic_order_cnt_lsb;
315  if (poc != (int32_t)poc)
316  return AVERROR_INVALIDDATA;
317  rps->poc[i] = poc;
318  prev_delta_msb = delta;
319  }
320  }
321 
322  return 0;
323 }
324 
326 {
327  AVCodecContext *avctx = s->avctx;
328  const HEVCParamSets *ps = &s->ps;
329  const HEVCVPS *vps = (const HEVCVPS*)ps->vps_list[sps->vps_id]->data;
330  const HEVCWindow *ow = &sps->output_window;
331  unsigned int num = 0, den = 0;
332 
333  avctx->pix_fmt = sps->pix_fmt;
334  avctx->coded_width = sps->width;
335  avctx->coded_height = sps->height;
336  avctx->width = sps->width - ow->left_offset - ow->right_offset;
337  avctx->height = sps->height - ow->top_offset - ow->bottom_offset;
338  avctx->has_b_frames = sps->temporal_layer[sps->max_sub_layers - 1].num_reorder_pics;
339  avctx->profile = sps->ptl.general_ptl.profile_idc;
340  avctx->level = sps->ptl.general_ptl.level_idc;
341 
342  ff_set_sar(avctx, sps->vui.common.sar);
343 
344  if (sps->vui.common.video_signal_type_present_flag)
345  avctx->color_range = sps->vui.common.video_full_range_flag ? AVCOL_RANGE_JPEG
347  else
348  avctx->color_range = AVCOL_RANGE_MPEG;
349 
350  if (sps->vui.common.colour_description_present_flag) {
351  avctx->color_primaries = sps->vui.common.colour_primaries;
352  avctx->color_trc = sps->vui.common.transfer_characteristics;
353  avctx->colorspace = sps->vui.common.matrix_coeffs;
354  } else {
358  }
359 
361  if (sps->chroma_format_idc == 1) {
362  if (sps->vui.common.chroma_loc_info_present_flag) {
363  if (sps->vui.common.chroma_sample_loc_type_top_field <= 5)
364  avctx->chroma_sample_location = sps->vui.common.chroma_sample_loc_type_top_field + 1;
365  } else
367  }
368 
369  if (vps->vps_timing_info_present_flag) {
370  num = vps->vps_num_units_in_tick;
371  den = vps->vps_time_scale;
372  } else if (sps->vui.vui_timing_info_present_flag) {
373  num = sps->vui.vui_num_units_in_tick;
374  den = sps->vui.vui_time_scale;
375  }
376 
377  if (num != 0 && den != 0)
378  av_reduce(&avctx->framerate.den, &avctx->framerate.num,
379  num, den, 1 << 30);
380 }
381 
383 {
384  AVCodecContext *avctx = s->avctx;
385 
386  if (s->sei.common.a53_caption.buf_ref)
387  s->avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS;
388 
389  if (s->sei.common.alternative_transfer.present &&
390  av_color_transfer_name(s->sei.common.alternative_transfer.preferred_transfer_characteristics) &&
391  s->sei.common.alternative_transfer.preferred_transfer_characteristics != AVCOL_TRC_UNSPECIFIED) {
392  avctx->color_trc = s->sei.common.alternative_transfer.preferred_transfer_characteristics;
393  }
394 
395  if (s->sei.common.film_grain_characteristics.present)
397 
398  return 0;
399 }
400 
402 {
403 #define HWACCEL_MAX (CONFIG_HEVC_DXVA2_HWACCEL + \
404  CONFIG_HEVC_D3D11VA_HWACCEL * 2 + \
405  CONFIG_HEVC_NVDEC_HWACCEL + \
406  CONFIG_HEVC_VAAPI_HWACCEL + \
407  CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL + \
408  CONFIG_HEVC_VDPAU_HWACCEL)
409  enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmt = pix_fmts;
410 
411  switch (sps->pix_fmt) {
412  case AV_PIX_FMT_YUV420P:
413  case AV_PIX_FMT_YUVJ420P:
414 #if CONFIG_HEVC_DXVA2_HWACCEL
415  *fmt++ = AV_PIX_FMT_DXVA2_VLD;
416 #endif
417 #if CONFIG_HEVC_D3D11VA_HWACCEL
418  *fmt++ = AV_PIX_FMT_D3D11VA_VLD;
419  *fmt++ = AV_PIX_FMT_D3D11;
420 #endif
421 #if CONFIG_HEVC_VAAPI_HWACCEL
422  *fmt++ = AV_PIX_FMT_VAAPI;
423 #endif
424 #if CONFIG_HEVC_VDPAU_HWACCEL
425  *fmt++ = AV_PIX_FMT_VDPAU;
426 #endif
427 #if CONFIG_HEVC_NVDEC_HWACCEL
428  *fmt++ = AV_PIX_FMT_CUDA;
429 #endif
430 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
431  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
432 #endif
433  break;
435 #if CONFIG_HEVC_DXVA2_HWACCEL
436  *fmt++ = AV_PIX_FMT_DXVA2_VLD;
437 #endif
438 #if CONFIG_HEVC_D3D11VA_HWACCEL
439  *fmt++ = AV_PIX_FMT_D3D11VA_VLD;
440  *fmt++ = AV_PIX_FMT_D3D11;
441 #endif
442 #if CONFIG_HEVC_VAAPI_HWACCEL
443  *fmt++ = AV_PIX_FMT_VAAPI;
444 #endif
445 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
446  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
447 #endif
448 #if CONFIG_HEVC_VDPAU_HWACCEL
449  *fmt++ = AV_PIX_FMT_VDPAU;
450 #endif
451 #if CONFIG_HEVC_NVDEC_HWACCEL
452  *fmt++ = AV_PIX_FMT_CUDA;
453 #endif
454  break;
455  case AV_PIX_FMT_YUV444P:
456 #if CONFIG_HEVC_VAAPI_HWACCEL
457  *fmt++ = AV_PIX_FMT_VAAPI;
458 #endif
459 #if CONFIG_HEVC_VDPAU_HWACCEL
460  *fmt++ = AV_PIX_FMT_VDPAU;
461 #endif
462 #if CONFIG_HEVC_NVDEC_HWACCEL
463  *fmt++ = AV_PIX_FMT_CUDA;
464 #endif
465 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
466  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
467 #endif
468  break;
469  case AV_PIX_FMT_YUV422P:
471 #if CONFIG_HEVC_VAAPI_HWACCEL
472  *fmt++ = AV_PIX_FMT_VAAPI;
473 #endif
474 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
475  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
476 #endif
477  break;
479 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
480  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
481 #endif
484 #if CONFIG_HEVC_VAAPI_HWACCEL
485  *fmt++ = AV_PIX_FMT_VAAPI;
486 #endif
487 #if CONFIG_HEVC_VDPAU_HWACCEL
488  *fmt++ = AV_PIX_FMT_VDPAU;
489 #endif
490 #if CONFIG_HEVC_NVDEC_HWACCEL
491  *fmt++ = AV_PIX_FMT_CUDA;
492 #endif
493  break;
495 #if CONFIG_HEVC_VAAPI_HWACCEL
496  *fmt++ = AV_PIX_FMT_VAAPI;
497 #endif
498  break;
499  }
500 
501  *fmt++ = sps->pix_fmt;
502  *fmt = AV_PIX_FMT_NONE;
503 
504  return ff_thread_get_format(s->avctx, pix_fmts);
505 }
506 
507 static int set_sps(HEVCContext *s, const HEVCSPS *sps,
508  enum AVPixelFormat pix_fmt)
509 {
510  int ret, i;
511 
513  s->ps.sps = NULL;
514  s->ps.vps = NULL;
515 
516  if (!sps)
517  return 0;
518 
519  ret = pic_arrays_init(s, sps);
520  if (ret < 0)
521  goto fail;
522 
524 
525  s->avctx->pix_fmt = pix_fmt;
526 
527  ff_hevc_pred_init(&s->hpc, sps->bit_depth);
528  ff_hevc_dsp_init (&s->hevcdsp, sps->bit_depth);
529  ff_videodsp_init (&s->vdsp, sps->bit_depth);
530 
531  for (i = 0; i < 3; i++) {
532  av_freep(&s->sao_pixel_buffer_h[i]);
533  av_freep(&s->sao_pixel_buffer_v[i]);
534  }
535 
536  if (sps->sao_enabled && !s->avctx->hwaccel) {
537  int c_count = (sps->chroma_format_idc != 0) ? 3 : 1;
538  int c_idx;
539 
540  for(c_idx = 0; c_idx < c_count; c_idx++) {
541  int w = sps->width >> sps->hshift[c_idx];
542  int h = sps->height >> sps->vshift[c_idx];
543  s->sao_pixel_buffer_h[c_idx] =
544  av_malloc((w * 2 * sps->ctb_height) <<
545  sps->pixel_shift);
546  s->sao_pixel_buffer_v[c_idx] =
547  av_malloc((h * 2 * sps->ctb_width) <<
548  sps->pixel_shift);
549  if (!s->sao_pixel_buffer_h[c_idx] ||
550  !s->sao_pixel_buffer_v[c_idx])
551  goto fail;
552  }
553  }
554 
555  s->ps.sps = sps;
556  s->ps.vps = (HEVCVPS*) s->ps.vps_list[s->ps.sps->vps_id]->data;
557 
558  return 0;
559 
560 fail:
562  for (i = 0; i < 3; i++) {
563  av_freep(&s->sao_pixel_buffer_h[i]);
564  av_freep(&s->sao_pixel_buffer_v[i]);
565  }
566  s->ps.sps = NULL;
567  return ret;
568 }
569 
571 {
572  GetBitContext *gb = &s->HEVClc->gb;
573  SliceHeader *sh = &s->sh;
574  int i, ret;
575 
576  // Coded parameters
578  if (s->ref && sh->first_slice_in_pic_flag) {
579  av_log(s->avctx, AV_LOG_ERROR, "Two slices reporting being the first in the same frame.\n");
580  return 1; // This slice will be skipped later, do not corrupt state
581  }
582 
583  if ((IS_IDR(s) || IS_BLA(s)) && sh->first_slice_in_pic_flag) {
584  s->seq_decode = (s->seq_decode + 1) & HEVC_SEQUENCE_COUNTER_MASK;
585  s->max_ra = INT_MAX;
586  if (IS_IDR(s))
588  }
590  if (IS_IRAP(s))
592 
593  sh->pps_id = get_ue_golomb_long(gb);
594  if (sh->pps_id >= HEVC_MAX_PPS_COUNT || !s->ps.pps_list[sh->pps_id]) {
595  av_log(s->avctx, AV_LOG_ERROR, "PPS id out of range: %d\n", sh->pps_id);
596  return AVERROR_INVALIDDATA;
597  }
598  if (!sh->first_slice_in_pic_flag &&
599  s->ps.pps != (HEVCPPS*)s->ps.pps_list[sh->pps_id]->data) {
600  av_log(s->avctx, AV_LOG_ERROR, "PPS changed between slices.\n");
601  return AVERROR_INVALIDDATA;
602  }
603  s->ps.pps = (HEVCPPS*)s->ps.pps_list[sh->pps_id]->data;
604  if (s->nal_unit_type == HEVC_NAL_CRA_NUT && s->last_eos == 1)
606 
607  if (s->ps.sps != (HEVCSPS*)s->ps.sps_list[s->ps.pps->sps_id]->data) {
608  const HEVCSPS *sps = (HEVCSPS*)s->ps.sps_list[s->ps.pps->sps_id]->data;
609  enum AVPixelFormat pix_fmt;
610 
612 
613  ret = set_sps(s, sps, sps->pix_fmt);
614  if (ret < 0)
615  return ret;
616 
617  pix_fmt = get_format(s, sps);
618  if (pix_fmt < 0)
619  return pix_fmt;
620  s->avctx->pix_fmt = pix_fmt;
621 
622  s->seq_decode = (s->seq_decode + 1) & HEVC_SEQUENCE_COUNTER_MASK;
623  s->max_ra = INT_MAX;
624  }
625 
627  if (ret < 0)
628  return ret;
629 
631  if (!sh->first_slice_in_pic_flag) {
632  int slice_address_length;
633 
634  if (s->ps.pps->dependent_slice_segments_enabled_flag)
636 
637  slice_address_length = av_ceil_log2(s->ps.sps->ctb_width *
638  s->ps.sps->ctb_height);
639  sh->slice_segment_addr = get_bitsz(gb, slice_address_length);
640  if (sh->slice_segment_addr >= s->ps.sps->ctb_width * s->ps.sps->ctb_height) {
641  av_log(s->avctx, AV_LOG_ERROR,
642  "Invalid slice segment address: %u.\n",
643  sh->slice_segment_addr);
644  return AVERROR_INVALIDDATA;
645  }
646 
647  if (!sh->dependent_slice_segment_flag) {
648  sh->slice_addr = sh->slice_segment_addr;
649  s->slice_idx++;
650  }
651  } else {
652  sh->slice_segment_addr = sh->slice_addr = 0;
653  s->slice_idx = 0;
654  s->slice_initialized = 0;
655  }
656 
657  if (!sh->dependent_slice_segment_flag) {
658  s->slice_initialized = 0;
659 
660  for (i = 0; i < s->ps.pps->num_extra_slice_header_bits; i++)
661  skip_bits(gb, 1); // slice_reserved_undetermined_flag[]
662 
663  sh->slice_type = get_ue_golomb_long(gb);
664  if (!(sh->slice_type == HEVC_SLICE_I ||
665  sh->slice_type == HEVC_SLICE_P ||
666  sh->slice_type == HEVC_SLICE_B)) {
667  av_log(s->avctx, AV_LOG_ERROR, "Unknown slice type: %d.\n",
668  sh->slice_type);
669  return AVERROR_INVALIDDATA;
670  }
671  if (IS_IRAP(s) && sh->slice_type != HEVC_SLICE_I &&
672  !s->ps.pps->pps_curr_pic_ref_enabled_flag) {
673  av_log(s->avctx, AV_LOG_ERROR, "Inter slices in an IRAP frame.\n");
674  return AVERROR_INVALIDDATA;
675  }
676 
677  // when flag is not present, picture is inferred to be output
678  sh->pic_output_flag = 1;
679  if (s->ps.pps->output_flag_present_flag)
680  sh->pic_output_flag = get_bits1(gb);
681 
682  if (s->ps.sps->separate_colour_plane_flag)
683  sh->colour_plane_id = get_bits(gb, 2);
684 
685  if (!IS_IDR(s)) {
686  int poc, pos;
687 
688  sh->pic_order_cnt_lsb = get_bits(gb, s->ps.sps->log2_max_poc_lsb);
689  poc = ff_hevc_compute_poc(s->ps.sps, s->pocTid0, sh->pic_order_cnt_lsb, s->nal_unit_type);
690  if (!sh->first_slice_in_pic_flag && poc != s->poc) {
691  av_log(s->avctx, AV_LOG_WARNING,
692  "Ignoring POC change between slices: %d -> %d\n", s->poc, poc);
693  if (s->avctx->err_recognition & AV_EF_EXPLODE)
694  return AVERROR_INVALIDDATA;
695  poc = s->poc;
696  }
697  s->poc = poc;
698 
700  pos = get_bits_left(gb);
702  ret = ff_hevc_decode_short_term_rps(gb, s->avctx, &sh->slice_rps, s->ps.sps, 1);
703  if (ret < 0)
704  return ret;
705 
706  sh->short_term_rps = &sh->slice_rps;
707  } else {
708  int numbits, rps_idx;
709 
710  if (!s->ps.sps->nb_st_rps) {
711  av_log(s->avctx, AV_LOG_ERROR, "No ref lists in the SPS.\n");
712  return AVERROR_INVALIDDATA;
713  }
714 
715  numbits = av_ceil_log2(s->ps.sps->nb_st_rps);
716  rps_idx = numbits > 0 ? get_bits(gb, numbits) : 0;
717  sh->short_term_rps = &s->ps.sps->st_rps[rps_idx];
718  }
720 
721  pos = get_bits_left(gb);
722  ret = decode_lt_rps(s, &sh->long_term_rps, gb);
723  if (ret < 0) {
724  av_log(s->avctx, AV_LOG_WARNING, "Invalid long term RPS.\n");
725  if (s->avctx->err_recognition & AV_EF_EXPLODE)
726  return AVERROR_INVALIDDATA;
727  }
729 
730  if (s->ps.sps->sps_temporal_mvp_enabled_flag)
732  else
734  } else {
735  s->sh.short_term_rps = NULL;
736  s->poc = 0;
737  }
738 
739  /* 8.3.1 */
740  if (sh->first_slice_in_pic_flag && s->temporal_id == 0 &&
741  s->nal_unit_type != HEVC_NAL_TRAIL_N &&
742  s->nal_unit_type != HEVC_NAL_TSA_N &&
743  s->nal_unit_type != HEVC_NAL_STSA_N &&
744  s->nal_unit_type != HEVC_NAL_RADL_N &&
745  s->nal_unit_type != HEVC_NAL_RADL_R &&
746  s->nal_unit_type != HEVC_NAL_RASL_N &&
747  s->nal_unit_type != HEVC_NAL_RASL_R)
748  s->pocTid0 = s->poc;
749 
750  if (s->ps.sps->sao_enabled) {
752  if (s->ps.sps->chroma_format_idc) {
755  }
756  } else {
760  }
761 
762  sh->nb_refs[L0] = sh->nb_refs[L1] = 0;
763  if (sh->slice_type == HEVC_SLICE_P || sh->slice_type == HEVC_SLICE_B) {
764  int nb_refs;
765 
766  sh->nb_refs[L0] = s->ps.pps->num_ref_idx_l0_default_active;
767  if (sh->slice_type == HEVC_SLICE_B)
768  sh->nb_refs[L1] = s->ps.pps->num_ref_idx_l1_default_active;
769 
770  if (get_bits1(gb)) { // num_ref_idx_active_override_flag
771  sh->nb_refs[L0] = get_ue_golomb_long(gb) + 1;
772  if (sh->slice_type == HEVC_SLICE_B)
773  sh->nb_refs[L1] = get_ue_golomb_long(gb) + 1;
774  }
775  if (sh->nb_refs[L0] > HEVC_MAX_REFS || sh->nb_refs[L1] > HEVC_MAX_REFS) {
776  av_log(s->avctx, AV_LOG_ERROR, "Too many refs: %d/%d.\n",
777  sh->nb_refs[L0], sh->nb_refs[L1]);
778  return AVERROR_INVALIDDATA;
779  }
780 
781  sh->rpl_modification_flag[0] = 0;
782  sh->rpl_modification_flag[1] = 0;
783  nb_refs = ff_hevc_frame_nb_refs(s);
784  if (!nb_refs) {
785  av_log(s->avctx, AV_LOG_ERROR, "Zero refs for a frame with P or B slices.\n");
786  return AVERROR_INVALIDDATA;
787  }
788 
789  if (s->ps.pps->lists_modification_present_flag && nb_refs > 1) {
790  sh->rpl_modification_flag[0] = get_bits1(gb);
791  if (sh->rpl_modification_flag[0]) {
792  for (i = 0; i < sh->nb_refs[L0]; i++)
793  sh->list_entry_lx[0][i] = get_bits(gb, av_ceil_log2(nb_refs));
794  }
795 
796  if (sh->slice_type == HEVC_SLICE_B) {
797  sh->rpl_modification_flag[1] = get_bits1(gb);
798  if (sh->rpl_modification_flag[1] == 1)
799  for (i = 0; i < sh->nb_refs[L1]; i++)
800  sh->list_entry_lx[1][i] = get_bits(gb, av_ceil_log2(nb_refs));
801  }
802  }
803 
804  if (sh->slice_type == HEVC_SLICE_B)
805  sh->mvd_l1_zero_flag = get_bits1(gb);
806 
807  if (s->ps.pps->cabac_init_present_flag)
808  sh->cabac_init_flag = get_bits1(gb);
809  else
810  sh->cabac_init_flag = 0;
811 
812  sh->collocated_ref_idx = 0;
814  sh->collocated_list = L0;
815  if (sh->slice_type == HEVC_SLICE_B)
816  sh->collocated_list = !get_bits1(gb);
817 
818  if (sh->nb_refs[sh->collocated_list] > 1) {
820  if (sh->collocated_ref_idx >= sh->nb_refs[sh->collocated_list]) {
821  av_log(s->avctx, AV_LOG_ERROR,
822  "Invalid collocated_ref_idx: %d.\n",
823  sh->collocated_ref_idx);
824  return AVERROR_INVALIDDATA;
825  }
826  }
827  }
828 
829  if ((s->ps.pps->weighted_pred_flag && sh->slice_type == HEVC_SLICE_P) ||
830  (s->ps.pps->weighted_bipred_flag && sh->slice_type == HEVC_SLICE_B)) {
831  int ret = pred_weight_table(s, gb);
832  if (ret < 0)
833  return ret;
834  }
835 
837  if (sh->max_num_merge_cand < 1 || sh->max_num_merge_cand > 5) {
838  av_log(s->avctx, AV_LOG_ERROR,
839  "Invalid number of merging MVP candidates: %d.\n",
840  sh->max_num_merge_cand);
841  return AVERROR_INVALIDDATA;
842  }
843 
844  // Syntax in 7.3.6.1
845  if (s->ps.sps->motion_vector_resolution_control_idc == 2)
846  sh->use_integer_mv_flag = get_bits1(gb);
847  else
848  // Inferred to be equal to motion_vector_resolution_control_idc if not present
849  sh->use_integer_mv_flag = s->ps.sps->motion_vector_resolution_control_idc;
850 
851  }
852 
853  sh->slice_qp_delta = get_se_golomb(gb);
854 
855  if (s->ps.pps->pic_slice_level_chroma_qp_offsets_present_flag) {
858  if (sh->slice_cb_qp_offset < -12 || sh->slice_cb_qp_offset > 12 ||
859  sh->slice_cr_qp_offset < -12 || sh->slice_cr_qp_offset > 12) {
860  av_log(s->avctx, AV_LOG_ERROR, "Invalid slice cx qp offset.\n");
861  return AVERROR_INVALIDDATA;
862  }
863  } else {
864  sh->slice_cb_qp_offset = 0;
865  sh->slice_cr_qp_offset = 0;
866  }
867 
868  if (s->ps.pps->pps_slice_act_qp_offsets_present_flag) {
872  }
873 
874  if (s->ps.pps->chroma_qp_offset_list_enabled_flag)
876  else
878 
879  if (s->ps.pps->deblocking_filter_control_present_flag) {
880  int deblocking_filter_override_flag = 0;
881 
882  if (s->ps.pps->deblocking_filter_override_enabled_flag)
883  deblocking_filter_override_flag = get_bits1(gb);
884 
885  if (deblocking_filter_override_flag) {
888  int beta_offset_div2 = get_se_golomb(gb);
889  int tc_offset_div2 = get_se_golomb(gb) ;
890  if (beta_offset_div2 < -6 || beta_offset_div2 > 6 ||
891  tc_offset_div2 < -6 || tc_offset_div2 > 6) {
892  av_log(s->avctx, AV_LOG_ERROR,
893  "Invalid deblock filter offsets: %d, %d\n",
894  beta_offset_div2, tc_offset_div2);
895  return AVERROR_INVALIDDATA;
896  }
897  sh->beta_offset = beta_offset_div2 * 2;
898  sh->tc_offset = tc_offset_div2 * 2;
899  }
900  } else {
901  sh->disable_deblocking_filter_flag = s->ps.pps->disable_dbf;
902  sh->beta_offset = s->ps.pps->beta_offset;
903  sh->tc_offset = s->ps.pps->tc_offset;
904  }
905  } else {
907  sh->beta_offset = 0;
908  sh->tc_offset = 0;
909  }
910 
911  if (s->ps.pps->seq_loop_filter_across_slices_enabled_flag &&
916  } else {
917  sh->slice_loop_filter_across_slices_enabled_flag = s->ps.pps->seq_loop_filter_across_slices_enabled_flag;
918  }
919  } else if (!s->slice_initialized) {
920  av_log(s->avctx, AV_LOG_ERROR, "Independent slice segment missing.\n");
921  return AVERROR_INVALIDDATA;
922  }
923 
924  sh->num_entry_point_offsets = 0;
925  if (s->ps.pps->tiles_enabled_flag || s->ps.pps->entropy_coding_sync_enabled_flag) {
926  unsigned num_entry_point_offsets = get_ue_golomb_long(gb);
927  // It would be possible to bound this tighter but this here is simpler
928  if (num_entry_point_offsets > get_bits_left(gb)) {
929  av_log(s->avctx, AV_LOG_ERROR, "num_entry_point_offsets %d is invalid\n", num_entry_point_offsets);
930  return AVERROR_INVALIDDATA;
931  }
932 
933  sh->num_entry_point_offsets = num_entry_point_offsets;
934  if (sh->num_entry_point_offsets > 0) {
935  int offset_len = get_ue_golomb_long(gb) + 1;
936 
937  if (offset_len < 1 || offset_len > 32) {
938  sh->num_entry_point_offsets = 0;
939  av_log(s->avctx, AV_LOG_ERROR, "offset_len %d is invalid\n", offset_len);
940  return AVERROR_INVALIDDATA;
941  }
942 
944  av_freep(&sh->offset);
945  av_freep(&sh->size);
946  sh->entry_point_offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(unsigned));
947  sh->offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
948  sh->size = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
949  if (!sh->entry_point_offset || !sh->offset || !sh->size) {
950  sh->num_entry_point_offsets = 0;
951  av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate memory\n");
952  return AVERROR(ENOMEM);
953  }
954  for (i = 0; i < sh->num_entry_point_offsets; i++) {
955  unsigned val = get_bits_long(gb, offset_len);
956  sh->entry_point_offset[i] = val + 1; // +1; // +1 to get the size
957  }
958  if (s->threads_number > 1 && (s->ps.pps->num_tile_rows > 1 || s->ps.pps->num_tile_columns > 1)) {
959  s->enable_parallel_tiles = 0; // TODO: you can enable tiles in parallel here
960  s->threads_number = 1;
961  } else
962  s->enable_parallel_tiles = 0;
963  } else
964  s->enable_parallel_tiles = 0;
965  }
966 
967  if (s->ps.pps->slice_header_extension_present_flag) {
968  unsigned int length = get_ue_golomb_long(gb);
969  if (length*8LL > get_bits_left(gb)) {
970  av_log(s->avctx, AV_LOG_ERROR, "too many slice_header_extension_data_bytes\n");
971  return AVERROR_INVALIDDATA;
972  }
973  for (i = 0; i < length; i++)
974  skip_bits(gb, 8); // slice_header_extension_data_byte
975  }
976 
977  // Inferred parameters
978  sh->slice_qp = 26U + s->ps.pps->pic_init_qp_minus26 + sh->slice_qp_delta;
979  if (sh->slice_qp > 51 ||
980  sh->slice_qp < -s->ps.sps->qp_bd_offset) {
981  av_log(s->avctx, AV_LOG_ERROR,
982  "The slice_qp %d is outside the valid range "
983  "[%d, 51].\n",
984  sh->slice_qp,
985  -s->ps.sps->qp_bd_offset);
986  return AVERROR_INVALIDDATA;
987  }
988 
990 
991  if (!s->sh.slice_ctb_addr_rs && s->sh.dependent_slice_segment_flag) {
992  av_log(s->avctx, AV_LOG_ERROR, "Impossible slice segment.\n");
993  return AVERROR_INVALIDDATA;
994  }
995 
996  if (get_bits_left(gb) < 0) {
997  av_log(s->avctx, AV_LOG_ERROR,
998  "Overread slice header by %d bits\n", -get_bits_left(gb));
999  return AVERROR_INVALIDDATA;
1000  }
1001 
1002  s->HEVClc->first_qp_group = !s->sh.dependent_slice_segment_flag;
1003 
1004  if (!s->ps.pps->cu_qp_delta_enabled_flag)
1005  s->HEVClc->qp_y = s->sh.slice_qp;
1006 
1007  s->slice_initialized = 1;
1008  s->HEVClc->tu.cu_qp_offset_cb = 0;
1009  s->HEVClc->tu.cu_qp_offset_cr = 0;
1010 
1011  return 0;
1012 }
1013 
1014 #define CTB(tab, x, y) ((tab)[(y) * s->ps.sps->ctb_width + (x)])
1015 
1016 #define SET_SAO(elem, value) \
1017 do { \
1018  if (!sao_merge_up_flag && !sao_merge_left_flag) \
1019  sao->elem = value; \
1020  else if (sao_merge_left_flag) \
1021  sao->elem = CTB(s->sao, rx-1, ry).elem; \
1022  else if (sao_merge_up_flag) \
1023  sao->elem = CTB(s->sao, rx, ry-1).elem; \
1024  else \
1025  sao->elem = 0; \
1026 } while (0)
1027 
1028 static void hls_sao_param(HEVCLocalContext *lc, int rx, int ry)
1029 {
1030  const HEVCContext *const s = lc->parent;
1031  int sao_merge_left_flag = 0;
1032  int sao_merge_up_flag = 0;
1033  SAOParams *sao = &CTB(s->sao, rx, ry);
1034  int c_idx, i;
1035 
1036  if (s->sh.slice_sample_adaptive_offset_flag[0] ||
1037  s->sh.slice_sample_adaptive_offset_flag[1]) {
1038  if (rx > 0) {
1039  if (lc->ctb_left_flag)
1040  sao_merge_left_flag = ff_hevc_sao_merge_flag_decode(lc);
1041  }
1042  if (ry > 0 && !sao_merge_left_flag) {
1043  if (lc->ctb_up_flag)
1044  sao_merge_up_flag = ff_hevc_sao_merge_flag_decode(lc);
1045  }
1046  }
1047 
1048  for (c_idx = 0; c_idx < (s->ps.sps->chroma_format_idc ? 3 : 1); c_idx++) {
1049  int log2_sao_offset_scale = c_idx == 0 ? s->ps.pps->log2_sao_offset_scale_luma :
1050  s->ps.pps->log2_sao_offset_scale_chroma;
1051 
1052  if (!s->sh.slice_sample_adaptive_offset_flag[c_idx]) {
1053  sao->type_idx[c_idx] = SAO_NOT_APPLIED;
1054  continue;
1055  }
1056 
1057  if (c_idx == 2) {
1058  sao->type_idx[2] = sao->type_idx[1];
1059  sao->eo_class[2] = sao->eo_class[1];
1060  } else {
1061  SET_SAO(type_idx[c_idx], ff_hevc_sao_type_idx_decode(lc));
1062  }
1063 
1064  if (sao->type_idx[c_idx] == SAO_NOT_APPLIED)
1065  continue;
1066 
1067  for (i = 0; i < 4; i++)
1068  SET_SAO(offset_abs[c_idx][i], ff_hevc_sao_offset_abs_decode(lc));
1069 
1070  if (sao->type_idx[c_idx] == SAO_BAND) {
1071  for (i = 0; i < 4; i++) {
1072  if (sao->offset_abs[c_idx][i]) {
1073  SET_SAO(offset_sign[c_idx][i],
1075  } else {
1076  sao->offset_sign[c_idx][i] = 0;
1077  }
1078  }
1079  SET_SAO(band_position[c_idx], ff_hevc_sao_band_position_decode(lc));
1080  } else if (c_idx != 2) {
1081  SET_SAO(eo_class[c_idx], ff_hevc_sao_eo_class_decode(lc));
1082  }
1083 
1084  // Inferred parameters
1085  sao->offset_val[c_idx][0] = 0;
1086  for (i = 0; i < 4; i++) {
1087  sao->offset_val[c_idx][i + 1] = sao->offset_abs[c_idx][i];
1088  if (sao->type_idx[c_idx] == SAO_EDGE) {
1089  if (i > 1)
1090  sao->offset_val[c_idx][i + 1] = -sao->offset_val[c_idx][i + 1];
1091  } else if (sao->offset_sign[c_idx][i]) {
1092  sao->offset_val[c_idx][i + 1] = -sao->offset_val[c_idx][i + 1];
1093  }
1094  sao->offset_val[c_idx][i + 1] *= 1 << log2_sao_offset_scale;
1095  }
1096  }
1097 }
1098 
1099 #undef SET_SAO
1100 #undef CTB
1101 
1103 {
1104  int log2_res_scale_abs_plus1 = ff_hevc_log2_res_scale_abs(lc, idx);
1105 
1106  if (log2_res_scale_abs_plus1 != 0) {
1107  int res_scale_sign_flag = ff_hevc_res_scale_sign_flag(lc, idx);
1108  lc->tu.res_scale_val = (1 << (log2_res_scale_abs_plus1 - 1)) *
1109  (1 - 2 * res_scale_sign_flag);
1110  } else {
1111  lc->tu.res_scale_val = 0;
1112  }
1113 
1114 
1115  return 0;
1116 }
1117 
1118 static int hls_transform_unit(HEVCLocalContext *lc, int x0, int y0,
1119  int xBase, int yBase, int cb_xBase, int cb_yBase,
1120  int log2_cb_size, int log2_trafo_size,
1121  int blk_idx, int cbf_luma, int *cbf_cb, int *cbf_cr)
1122 {
1123  const HEVCContext *const s = lc->parent;
1124  const int log2_trafo_size_c = log2_trafo_size - s->ps.sps->hshift[1];
1125  int i;
1126 
1127  if (lc->cu.pred_mode == MODE_INTRA) {
1128  int trafo_size = 1 << log2_trafo_size;
1129  ff_hevc_set_neighbour_available(lc, x0, y0, trafo_size, trafo_size);
1130 
1131  s->hpc.intra_pred[log2_trafo_size - 2](lc, x0, y0, 0);
1132  }
1133 
1134  if (cbf_luma || cbf_cb[0] || cbf_cr[0] ||
1135  (s->ps.sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1]))) {
1136  int scan_idx = SCAN_DIAG;
1137  int scan_idx_c = SCAN_DIAG;
1138  int cbf_chroma = cbf_cb[0] || cbf_cr[0] ||
1139  (s->ps.sps->chroma_format_idc == 2 &&
1140  (cbf_cb[1] || cbf_cr[1]));
1141 
1142  if (s->ps.pps->cu_qp_delta_enabled_flag && !lc->tu.is_cu_qp_delta_coded) {
1144  if (lc->tu.cu_qp_delta != 0)
1145  if (ff_hevc_cu_qp_delta_sign_flag(lc) == 1)
1146  lc->tu.cu_qp_delta = -lc->tu.cu_qp_delta;
1147  lc->tu.is_cu_qp_delta_coded = 1;
1148 
1149  if (lc->tu.cu_qp_delta < -(26 + s->ps.sps->qp_bd_offset / 2) ||
1150  lc->tu.cu_qp_delta > (25 + s->ps.sps->qp_bd_offset / 2)) {
1151  av_log(s->avctx, AV_LOG_ERROR,
1152  "The cu_qp_delta %d is outside the valid range "
1153  "[%d, %d].\n",
1154  lc->tu.cu_qp_delta,
1155  -(26 + s->ps.sps->qp_bd_offset / 2),
1156  (25 + s->ps.sps->qp_bd_offset / 2));
1157  return AVERROR_INVALIDDATA;
1158  }
1159 
1160  ff_hevc_set_qPy(lc, cb_xBase, cb_yBase, log2_cb_size);
1161  }
1162 
1163  if (s->sh.cu_chroma_qp_offset_enabled_flag && cbf_chroma &&
1165  int cu_chroma_qp_offset_flag = ff_hevc_cu_chroma_qp_offset_flag(lc);
1166  if (cu_chroma_qp_offset_flag) {
1167  int cu_chroma_qp_offset_idx = 0;
1168  if (s->ps.pps->chroma_qp_offset_list_len_minus1 > 0) {
1169  cu_chroma_qp_offset_idx = ff_hevc_cu_chroma_qp_offset_idx(lc);
1170  av_log(s->avctx, AV_LOG_ERROR,
1171  "cu_chroma_qp_offset_idx not yet tested.\n");
1172  }
1173  lc->tu.cu_qp_offset_cb = s->ps.pps->cb_qp_offset_list[cu_chroma_qp_offset_idx];
1174  lc->tu.cu_qp_offset_cr = s->ps.pps->cr_qp_offset_list[cu_chroma_qp_offset_idx];
1175  } else {
1176  lc->tu.cu_qp_offset_cb = 0;
1177  lc->tu.cu_qp_offset_cr = 0;
1178  }
1180  }
1181 
1182  if (lc->cu.pred_mode == MODE_INTRA && log2_trafo_size < 4) {
1183  if (lc->tu.intra_pred_mode >= 6 &&
1184  lc->tu.intra_pred_mode <= 14) {
1185  scan_idx = SCAN_VERT;
1186  } else if (lc->tu.intra_pred_mode >= 22 &&
1187  lc->tu.intra_pred_mode <= 30) {
1188  scan_idx = SCAN_HORIZ;
1189  }
1190 
1191  if (lc->tu.intra_pred_mode_c >= 6 &&
1192  lc->tu.intra_pred_mode_c <= 14) {
1193  scan_idx_c = SCAN_VERT;
1194  } else if (lc->tu.intra_pred_mode_c >= 22 &&
1195  lc->tu.intra_pred_mode_c <= 30) {
1196  scan_idx_c = SCAN_HORIZ;
1197  }
1198  }
1199 
1200  lc->tu.cross_pf = 0;
1201 
1202  if (cbf_luma)
1203  ff_hevc_hls_residual_coding(lc, x0, y0, log2_trafo_size, scan_idx, 0);
1204  if (s->ps.sps->chroma_format_idc && (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3)) {
1205  int trafo_size_h = 1 << (log2_trafo_size_c + s->ps.sps->hshift[1]);
1206  int trafo_size_v = 1 << (log2_trafo_size_c + s->ps.sps->vshift[1]);
1207  lc->tu.cross_pf = (s->ps.pps->cross_component_prediction_enabled_flag && cbf_luma &&
1208  (lc->cu.pred_mode == MODE_INTER ||
1209  (lc->tu.chroma_mode_c == 4)));
1210 
1211  if (lc->tu.cross_pf) {
1212  hls_cross_component_pred(lc, 0);
1213  }
1214  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1215  if (lc->cu.pred_mode == MODE_INTRA) {
1216  ff_hevc_set_neighbour_available(lc, x0, y0 + (i << log2_trafo_size_c), trafo_size_h, trafo_size_v);
1217  s->hpc.intra_pred[log2_trafo_size_c - 2](lc, x0, y0 + (i << log2_trafo_size_c), 1);
1218  }
1219  if (cbf_cb[i])
1220  ff_hevc_hls_residual_coding(lc, x0, y0 + (i << log2_trafo_size_c),
1221  log2_trafo_size_c, scan_idx_c, 1);
1222  else
1223  if (lc->tu.cross_pf) {
1224  ptrdiff_t stride = s->frame->linesize[1];
1225  int hshift = s->ps.sps->hshift[1];
1226  int vshift = s->ps.sps->vshift[1];
1227  const int16_t *coeffs_y = (int16_t*)lc->edge_emu_buffer;
1228  int16_t *coeffs = (int16_t*)lc->edge_emu_buffer2;
1229  int size = 1 << log2_trafo_size_c;
1230 
1231  uint8_t *dst = &s->frame->data[1][(y0 >> vshift) * stride +
1232  ((x0 >> hshift) << s->ps.sps->pixel_shift)];
1233  for (i = 0; i < (size * size); i++) {
1234  coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3);
1235  }
1236  s->hevcdsp.add_residual[log2_trafo_size_c-2](dst, coeffs, stride);
1237  }
1238  }
1239 
1240  if (lc->tu.cross_pf) {
1241  hls_cross_component_pred(lc, 1);
1242  }
1243  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1244  if (lc->cu.pred_mode == MODE_INTRA) {
1245  ff_hevc_set_neighbour_available(lc, x0, y0 + (i << log2_trafo_size_c),
1246  trafo_size_h, trafo_size_v);
1247  s->hpc.intra_pred[log2_trafo_size_c - 2](lc, x0, y0 + (i << log2_trafo_size_c), 2);
1248  }
1249  if (cbf_cr[i])
1250  ff_hevc_hls_residual_coding(lc, x0, y0 + (i << log2_trafo_size_c),
1251  log2_trafo_size_c, scan_idx_c, 2);
1252  else
1253  if (lc->tu.cross_pf) {
1254  ptrdiff_t stride = s->frame->linesize[2];
1255  int hshift = s->ps.sps->hshift[2];
1256  int vshift = s->ps.sps->vshift[2];
1257  const int16_t *coeffs_y = (int16_t*)lc->edge_emu_buffer;
1258  int16_t *coeffs = (int16_t*)lc->edge_emu_buffer2;
1259  int size = 1 << log2_trafo_size_c;
1260 
1261  uint8_t *dst = &s->frame->data[2][(y0 >> vshift) * stride +
1262  ((x0 >> hshift) << s->ps.sps->pixel_shift)];
1263  for (i = 0; i < (size * size); i++) {
1264  coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3);
1265  }
1266  s->hevcdsp.add_residual[log2_trafo_size_c-2](dst, coeffs, stride);
1267  }
1268  }
1269  } else if (s->ps.sps->chroma_format_idc && blk_idx == 3) {
1270  int trafo_size_h = 1 << (log2_trafo_size + 1);
1271  int trafo_size_v = 1 << (log2_trafo_size + s->ps.sps->vshift[1]);
1272  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1273  if (lc->cu.pred_mode == MODE_INTRA) {
1274  ff_hevc_set_neighbour_available(lc, xBase, yBase + (i << log2_trafo_size),
1275  trafo_size_h, trafo_size_v);
1276  s->hpc.intra_pred[log2_trafo_size - 2](lc, xBase, yBase + (i << log2_trafo_size), 1);
1277  }
1278  if (cbf_cb[i])
1279  ff_hevc_hls_residual_coding(lc, xBase, yBase + (i << log2_trafo_size),
1280  log2_trafo_size, scan_idx_c, 1);
1281  }
1282  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1283  if (lc->cu.pred_mode == MODE_INTRA) {
1284  ff_hevc_set_neighbour_available(lc, xBase, yBase + (i << log2_trafo_size),
1285  trafo_size_h, trafo_size_v);
1286  s->hpc.intra_pred[log2_trafo_size - 2](lc, xBase, yBase + (i << log2_trafo_size), 2);
1287  }
1288  if (cbf_cr[i])
1289  ff_hevc_hls_residual_coding(lc, xBase, yBase + (i << log2_trafo_size),
1290  log2_trafo_size, scan_idx_c, 2);
1291  }
1292  }
1293  } else if (s->ps.sps->chroma_format_idc && lc->cu.pred_mode == MODE_INTRA) {
1294  if (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3) {
1295  int trafo_size_h = 1 << (log2_trafo_size_c + s->ps.sps->hshift[1]);
1296  int trafo_size_v = 1 << (log2_trafo_size_c + s->ps.sps->vshift[1]);
1297  ff_hevc_set_neighbour_available(lc, x0, y0, trafo_size_h, trafo_size_v);
1298  s->hpc.intra_pred[log2_trafo_size_c - 2](lc, x0, y0, 1);
1299  s->hpc.intra_pred[log2_trafo_size_c - 2](lc, x0, y0, 2);
1300  if (s->ps.sps->chroma_format_idc == 2) {
1301  ff_hevc_set_neighbour_available(lc, x0, y0 + (1 << log2_trafo_size_c),
1302  trafo_size_h, trafo_size_v);
1303  s->hpc.intra_pred[log2_trafo_size_c - 2](lc, x0, y0 + (1 << log2_trafo_size_c), 1);
1304  s->hpc.intra_pred[log2_trafo_size_c - 2](lc, x0, y0 + (1 << log2_trafo_size_c), 2);
1305  }
1306  } else if (blk_idx == 3) {
1307  int trafo_size_h = 1 << (log2_trafo_size + 1);
1308  int trafo_size_v = 1 << (log2_trafo_size + s->ps.sps->vshift[1]);
1309  ff_hevc_set_neighbour_available(lc, xBase, yBase,
1310  trafo_size_h, trafo_size_v);
1311  s->hpc.intra_pred[log2_trafo_size - 2](lc, xBase, yBase, 1);
1312  s->hpc.intra_pred[log2_trafo_size - 2](lc, xBase, yBase, 2);
1313  if (s->ps.sps->chroma_format_idc == 2) {
1314  ff_hevc_set_neighbour_available(lc, xBase, yBase + (1 << log2_trafo_size),
1315  trafo_size_h, trafo_size_v);
1316  s->hpc.intra_pred[log2_trafo_size - 2](lc, xBase, yBase + (1 << log2_trafo_size), 1);
1317  s->hpc.intra_pred[log2_trafo_size - 2](lc, xBase, yBase + (1 << log2_trafo_size), 2);
1318  }
1319  }
1320  }
1321 
1322  return 0;
1323 }
1324 
1325 static void set_deblocking_bypass(const HEVCContext *s, int x0, int y0, int log2_cb_size)
1326 {
1327  int cb_size = 1 << log2_cb_size;
1328  int log2_min_pu_size = s->ps.sps->log2_min_pu_size;
1329 
1330  int min_pu_width = s->ps.sps->min_pu_width;
1331  int x_end = FFMIN(x0 + cb_size, s->ps.sps->width);
1332  int y_end = FFMIN(y0 + cb_size, s->ps.sps->height);
1333  int i, j;
1334 
1335  for (j = (y0 >> log2_min_pu_size); j < (y_end >> log2_min_pu_size); j++)
1336  for (i = (x0 >> log2_min_pu_size); i < (x_end >> log2_min_pu_size); i++)
1337  s->is_pcm[i + j * min_pu_width] = 2;
1338 }
1339 
1340 static int hls_transform_tree(HEVCLocalContext *lc, int x0, int y0,
1341  int xBase, int yBase, int cb_xBase, int cb_yBase,
1342  int log2_cb_size, int log2_trafo_size,
1343  int trafo_depth, int blk_idx,
1344  const int *base_cbf_cb, const int *base_cbf_cr)
1345 {
1346  const HEVCContext *const s = lc->parent;
1347  uint8_t split_transform_flag;
1348  int cbf_cb[2];
1349  int cbf_cr[2];
1350  int ret;
1351 
1352  cbf_cb[0] = base_cbf_cb[0];
1353  cbf_cb[1] = base_cbf_cb[1];
1354  cbf_cr[0] = base_cbf_cr[0];
1355  cbf_cr[1] = base_cbf_cr[1];
1356 
1357  if (lc->cu.intra_split_flag) {
1358  if (trafo_depth == 1) {
1359  lc->tu.intra_pred_mode = lc->pu.intra_pred_mode[blk_idx];
1360  if (s->ps.sps->chroma_format_idc == 3) {
1361  lc->tu.intra_pred_mode_c = lc->pu.intra_pred_mode_c[blk_idx];
1362  lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[blk_idx];
1363  } else {
1365  lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[0];
1366  }
1367  }
1368  } else {
1369  lc->tu.intra_pred_mode = lc->pu.intra_pred_mode[0];
1371  lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[0];
1372  }
1373 
1374  if (log2_trafo_size <= s->ps.sps->log2_max_trafo_size &&
1375  log2_trafo_size > s->ps.sps->log2_min_tb_size &&
1376  trafo_depth < lc->cu.max_trafo_depth &&
1377  !(lc->cu.intra_split_flag && trafo_depth == 0)) {
1378  split_transform_flag = ff_hevc_split_transform_flag_decode(lc, log2_trafo_size);
1379  } else {
1380  int inter_split = s->ps.sps->max_transform_hierarchy_depth_inter == 0 &&
1381  lc->cu.pred_mode == MODE_INTER &&
1382  lc->cu.part_mode != PART_2Nx2N &&
1383  trafo_depth == 0;
1384 
1385  split_transform_flag = log2_trafo_size > s->ps.sps->log2_max_trafo_size ||
1386  (lc->cu.intra_split_flag && trafo_depth == 0) ||
1387  inter_split;
1388  }
1389 
1390  if (s->ps.sps->chroma_format_idc && (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3)) {
1391  if (trafo_depth == 0 || cbf_cb[0]) {
1392  cbf_cb[0] = ff_hevc_cbf_cb_cr_decode(lc, trafo_depth);
1393  if (s->ps.sps->chroma_format_idc == 2 && (!split_transform_flag || log2_trafo_size == 3)) {
1394  cbf_cb[1] = ff_hevc_cbf_cb_cr_decode(lc, trafo_depth);
1395  }
1396  }
1397 
1398  if (trafo_depth == 0 || cbf_cr[0]) {
1399  cbf_cr[0] = ff_hevc_cbf_cb_cr_decode(lc, trafo_depth);
1400  if (s->ps.sps->chroma_format_idc == 2 && (!split_transform_flag || log2_trafo_size == 3)) {
1401  cbf_cr[1] = ff_hevc_cbf_cb_cr_decode(lc, trafo_depth);
1402  }
1403  }
1404  }
1405 
1406  if (split_transform_flag) {
1407  const int trafo_size_split = 1 << (log2_trafo_size - 1);
1408  const int x1 = x0 + trafo_size_split;
1409  const int y1 = y0 + trafo_size_split;
1410 
1411 #define SUBDIVIDE(x, y, idx) \
1412 do { \
1413  ret = hls_transform_tree(lc, x, y, x0, y0, cb_xBase, cb_yBase, log2_cb_size,\
1414  log2_trafo_size - 1, trafo_depth + 1, idx, \
1415  cbf_cb, cbf_cr); \
1416  if (ret < 0) \
1417  return ret; \
1418 } while (0)
1419 
1420  SUBDIVIDE(x0, y0, 0);
1421  SUBDIVIDE(x1, y0, 1);
1422  SUBDIVIDE(x0, y1, 2);
1423  SUBDIVIDE(x1, y1, 3);
1424 
1425 #undef SUBDIVIDE
1426  } else {
1427  int min_tu_size = 1 << s->ps.sps->log2_min_tb_size;
1428  int log2_min_tu_size = s->ps.sps->log2_min_tb_size;
1429  int min_tu_width = s->ps.sps->min_tb_width;
1430  int cbf_luma = 1;
1431 
1432  if (lc->cu.pred_mode == MODE_INTRA || trafo_depth != 0 ||
1433  cbf_cb[0] || cbf_cr[0] ||
1434  (s->ps.sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1]))) {
1435  cbf_luma = ff_hevc_cbf_luma_decode(lc, trafo_depth);
1436  }
1437 
1438  ret = hls_transform_unit(lc, x0, y0, xBase, yBase, cb_xBase, cb_yBase,
1439  log2_cb_size, log2_trafo_size,
1440  blk_idx, cbf_luma, cbf_cb, cbf_cr);
1441  if (ret < 0)
1442  return ret;
1443  // TODO: store cbf_luma somewhere else
1444  if (cbf_luma) {
1445  int i, j;
1446  for (i = 0; i < (1 << log2_trafo_size); i += min_tu_size)
1447  for (j = 0; j < (1 << log2_trafo_size); j += min_tu_size) {
1448  int x_tu = (x0 + j) >> log2_min_tu_size;
1449  int y_tu = (y0 + i) >> log2_min_tu_size;
1450  s->cbf_luma[y_tu * min_tu_width + x_tu] = 1;
1451  }
1452  }
1453  if (!s->sh.disable_deblocking_filter_flag) {
1454  ff_hevc_deblocking_boundary_strengths(lc, x0, y0, log2_trafo_size);
1455  if (s->ps.pps->transquant_bypass_enable_flag &&
1457  set_deblocking_bypass(s, x0, y0, log2_trafo_size);
1458  }
1459  }
1460  return 0;
1461 }
1462 
1463 static int hls_pcm_sample(HEVCLocalContext *lc, int x0, int y0, int log2_cb_size)
1464 {
1465  const HEVCContext *const s = lc->parent;
1466  GetBitContext gb;
1467  int cb_size = 1 << log2_cb_size;
1468  ptrdiff_t stride0 = s->frame->linesize[0];
1469  ptrdiff_t stride1 = s->frame->linesize[1];
1470  ptrdiff_t stride2 = s->frame->linesize[2];
1471  uint8_t *dst0 = &s->frame->data[0][y0 * stride0 + (x0 << s->ps.sps->pixel_shift)];
1472  uint8_t *dst1 = &s->frame->data[1][(y0 >> s->ps.sps->vshift[1]) * stride1 + ((x0 >> s->ps.sps->hshift[1]) << s->ps.sps->pixel_shift)];
1473  uint8_t *dst2 = &s->frame->data[2][(y0 >> s->ps.sps->vshift[2]) * stride2 + ((x0 >> s->ps.sps->hshift[2]) << s->ps.sps->pixel_shift)];
1474 
1475  int length = cb_size * cb_size * s->ps.sps->pcm.bit_depth +
1476  (((cb_size >> s->ps.sps->hshift[1]) * (cb_size >> s->ps.sps->vshift[1])) +
1477  ((cb_size >> s->ps.sps->hshift[2]) * (cb_size >> s->ps.sps->vshift[2]))) *
1478  s->ps.sps->pcm.bit_depth_chroma;
1479  const uint8_t *pcm = skip_bytes(&lc->cc, (length + 7) >> 3);
1480  int ret;
1481 
1482  if (!s->sh.disable_deblocking_filter_flag)
1483  ff_hevc_deblocking_boundary_strengths(lc, x0, y0, log2_cb_size);
1484 
1485  ret = init_get_bits(&gb, pcm, length);
1486  if (ret < 0)
1487  return ret;
1488 
1489  s->hevcdsp.put_pcm(dst0, stride0, cb_size, cb_size, &gb, s->ps.sps->pcm.bit_depth);
1490  if (s->ps.sps->chroma_format_idc) {
1491  s->hevcdsp.put_pcm(dst1, stride1,
1492  cb_size >> s->ps.sps->hshift[1],
1493  cb_size >> s->ps.sps->vshift[1],
1494  &gb, s->ps.sps->pcm.bit_depth_chroma);
1495  s->hevcdsp.put_pcm(dst2, stride2,
1496  cb_size >> s->ps.sps->hshift[2],
1497  cb_size >> s->ps.sps->vshift[2],
1498  &gb, s->ps.sps->pcm.bit_depth_chroma);
1499  }
1500 
1501  return 0;
1502 }
1503 
1504 /**
1505  * 8.5.3.2.2.1 Luma sample unidirectional interpolation process
1506  *
1507  * @param s HEVC decoding context
1508  * @param dst target buffer for block data at block position
1509  * @param dststride stride of the dst buffer
1510  * @param ref reference picture buffer at origin (0, 0)
1511  * @param mv motion vector (relative to block position) to get pixel data from
1512  * @param x_off horizontal position of block from origin (0, 0)
1513  * @param y_off vertical position of block from origin (0, 0)
1514  * @param block_w width of block
1515  * @param block_h height of block
1516  * @param luma_weight weighting factor applied to the luma prediction
1517  * @param luma_offset additive offset applied to the luma prediction value
1518  */
1519 
1520 static void luma_mc_uni(HEVCLocalContext *lc, uint8_t *dst, ptrdiff_t dststride,
1521  const AVFrame *ref, const Mv *mv, int x_off, int y_off,
1522  int block_w, int block_h, int luma_weight, int luma_offset)
1523 {
1524  const HEVCContext *const s = lc->parent;
1525  const uint8_t *src = ref->data[0];
1526  ptrdiff_t srcstride = ref->linesize[0];
1527  int pic_width = s->ps.sps->width;
1528  int pic_height = s->ps.sps->height;
1529  int mx = mv->x & 3;
1530  int my = mv->y & 3;
1531  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1532  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1533  int idx = hevc_pel_weight[block_w];
1534 
1535  x_off += mv->x >> 2;
1536  y_off += mv->y >> 2;
1537  src += y_off * srcstride + (x_off * (1 << s->ps.sps->pixel_shift));
1538 
1539  if (x_off < QPEL_EXTRA_BEFORE || y_off < QPEL_EXTRA_AFTER ||
1540  x_off >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1541  y_off >= pic_height - block_h - QPEL_EXTRA_AFTER) {
1542  const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1543  int offset = QPEL_EXTRA_BEFORE * srcstride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1544  int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1545 
1546  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src - offset,
1547  edge_emu_stride, srcstride,
1548  block_w + QPEL_EXTRA,
1549  block_h + QPEL_EXTRA,
1550  x_off - QPEL_EXTRA_BEFORE, y_off - QPEL_EXTRA_BEFORE,
1551  pic_width, pic_height);
1552  src = lc->edge_emu_buffer + buf_offset;
1553  srcstride = edge_emu_stride;
1554  }
1555 
1556  if (!weight_flag)
1557  s->hevcdsp.put_hevc_qpel_uni[idx][!!my][!!mx](dst, dststride, src, srcstride,
1558  block_h, mx, my, block_w);
1559  else
1560  s->hevcdsp.put_hevc_qpel_uni_w[idx][!!my][!!mx](dst, dststride, src, srcstride,
1561  block_h, s->sh.luma_log2_weight_denom,
1562  luma_weight, luma_offset, mx, my, block_w);
1563 }
1564 
1565 /**
1566  * 8.5.3.2.2.1 Luma sample bidirectional interpolation process
1567  *
1568  * @param s HEVC decoding context
1569  * @param dst target buffer for block data at block position
1570  * @param dststride stride of the dst buffer
1571  * @param ref0 reference picture0 buffer at origin (0, 0)
1572  * @param mv0 motion vector0 (relative to block position) to get pixel data from
1573  * @param x_off horizontal position of block from origin (0, 0)
1574  * @param y_off vertical position of block from origin (0, 0)
1575  * @param block_w width of block
1576  * @param block_h height of block
1577  * @param ref1 reference picture1 buffer at origin (0, 0)
1578  * @param mv1 motion vector1 (relative to block position) to get pixel data from
1579  * @param current_mv current motion vector structure
1580  */
1581  static void luma_mc_bi(HEVCLocalContext *lc, uint8_t *dst, ptrdiff_t dststride,
1582  const AVFrame *ref0, const Mv *mv0, int x_off, int y_off,
1583  int block_w, int block_h, const AVFrame *ref1,
1584  const Mv *mv1, struct MvField *current_mv)
1585 {
1586  const HEVCContext *const s = lc->parent;
1587  ptrdiff_t src0stride = ref0->linesize[0];
1588  ptrdiff_t src1stride = ref1->linesize[0];
1589  int pic_width = s->ps.sps->width;
1590  int pic_height = s->ps.sps->height;
1591  int mx0 = mv0->x & 3;
1592  int my0 = mv0->y & 3;
1593  int mx1 = mv1->x & 3;
1594  int my1 = mv1->y & 3;
1595  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1596  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1597  int x_off0 = x_off + (mv0->x >> 2);
1598  int y_off0 = y_off + (mv0->y >> 2);
1599  int x_off1 = x_off + (mv1->x >> 2);
1600  int y_off1 = y_off + (mv1->y >> 2);
1601  int idx = hevc_pel_weight[block_w];
1602 
1603  const uint8_t *src0 = ref0->data[0] + y_off0 * src0stride + (int)((unsigned)x_off0 << s->ps.sps->pixel_shift);
1604  const uint8_t *src1 = ref1->data[0] + y_off1 * src1stride + (int)((unsigned)x_off1 << s->ps.sps->pixel_shift);
1605 
1606  if (x_off0 < QPEL_EXTRA_BEFORE || y_off0 < QPEL_EXTRA_AFTER ||
1607  x_off0 >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1608  y_off0 >= pic_height - block_h - QPEL_EXTRA_AFTER) {
1609  const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1610  int offset = QPEL_EXTRA_BEFORE * src0stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1611  int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1612 
1613  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src0 - offset,
1614  edge_emu_stride, src0stride,
1615  block_w + QPEL_EXTRA,
1616  block_h + QPEL_EXTRA,
1617  x_off0 - QPEL_EXTRA_BEFORE, y_off0 - QPEL_EXTRA_BEFORE,
1618  pic_width, pic_height);
1619  src0 = lc->edge_emu_buffer + buf_offset;
1620  src0stride = edge_emu_stride;
1621  }
1622 
1623  if (x_off1 < QPEL_EXTRA_BEFORE || y_off1 < QPEL_EXTRA_AFTER ||
1624  x_off1 >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1625  y_off1 >= pic_height - block_h - QPEL_EXTRA_AFTER) {
1626  const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1627  int offset = QPEL_EXTRA_BEFORE * src1stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1628  int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1629 
1630  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer2, src1 - offset,
1631  edge_emu_stride, src1stride,
1632  block_w + QPEL_EXTRA,
1633  block_h + QPEL_EXTRA,
1634  x_off1 - QPEL_EXTRA_BEFORE, y_off1 - QPEL_EXTRA_BEFORE,
1635  pic_width, pic_height);
1636  src1 = lc->edge_emu_buffer2 + buf_offset;
1637  src1stride = edge_emu_stride;
1638  }
1639 
1640  s->hevcdsp.put_hevc_qpel[idx][!!my0][!!mx0](lc->tmp, src0, src0stride,
1641  block_h, mx0, my0, block_w);
1642  if (!weight_flag)
1643  s->hevcdsp.put_hevc_qpel_bi[idx][!!my1][!!mx1](dst, dststride, src1, src1stride, lc->tmp,
1644  block_h, mx1, my1, block_w);
1645  else
1646  s->hevcdsp.put_hevc_qpel_bi_w[idx][!!my1][!!mx1](dst, dststride, src1, src1stride, lc->tmp,
1647  block_h, s->sh.luma_log2_weight_denom,
1648  s->sh.luma_weight_l0[current_mv->ref_idx[0]],
1649  s->sh.luma_weight_l1[current_mv->ref_idx[1]],
1650  s->sh.luma_offset_l0[current_mv->ref_idx[0]],
1651  s->sh.luma_offset_l1[current_mv->ref_idx[1]],
1652  mx1, my1, block_w);
1653 
1654 }
1655 
1656 /**
1657  * 8.5.3.2.2.2 Chroma sample uniprediction interpolation process
1658  *
1659  * @param s HEVC decoding context
1660  * @param dst1 target buffer for block data at block position (U plane)
1661  * @param dst2 target buffer for block data at block position (V plane)
1662  * @param dststride stride of the dst1 and dst2 buffers
1663  * @param ref reference picture buffer at origin (0, 0)
1664  * @param mv motion vector (relative to block position) to get pixel data from
1665  * @param x_off horizontal position of block from origin (0, 0)
1666  * @param y_off vertical position of block from origin (0, 0)
1667  * @param block_w width of block
1668  * @param block_h height of block
1669  * @param chroma_weight weighting factor applied to the chroma prediction
1670  * @param chroma_offset additive offset applied to the chroma prediction value
1671  */
1672 
1673 static void chroma_mc_uni(HEVCLocalContext *lc, uint8_t *dst0,
1674  ptrdiff_t dststride, const uint8_t *src0, ptrdiff_t srcstride, int reflist,
1675  int x_off, int y_off, int block_w, int block_h,
1676  const struct MvField *current_mv, int chroma_weight, int chroma_offset)
1677 {
1678  const HEVCContext *const s = lc->parent;
1679  int pic_width = s->ps.sps->width >> s->ps.sps->hshift[1];
1680  int pic_height = s->ps.sps->height >> s->ps.sps->vshift[1];
1681  const Mv *mv = &current_mv->mv[reflist];
1682  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1683  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1684  int idx = hevc_pel_weight[block_w];
1685  int hshift = s->ps.sps->hshift[1];
1686  int vshift = s->ps.sps->vshift[1];
1687  intptr_t mx = av_mod_uintp2(mv->x, 2 + hshift);
1688  intptr_t my = av_mod_uintp2(mv->y, 2 + vshift);
1689  intptr_t _mx = mx << (1 - hshift);
1690  intptr_t _my = my << (1 - vshift);
1691 
1692  x_off += mv->x >> (2 + hshift);
1693  y_off += mv->y >> (2 + vshift);
1694  src0 += y_off * srcstride + (x_off * (1 << s->ps.sps->pixel_shift));
1695 
1696  if (x_off < EPEL_EXTRA_BEFORE || y_off < EPEL_EXTRA_AFTER ||
1697  x_off >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1698  y_off >= pic_height - block_h - EPEL_EXTRA_AFTER) {
1699  const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1700  int offset0 = EPEL_EXTRA_BEFORE * (srcstride + (1 << s->ps.sps->pixel_shift));
1701  int buf_offset0 = EPEL_EXTRA_BEFORE *
1702  (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1703  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src0 - offset0,
1704  edge_emu_stride, srcstride,
1705  block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1706  x_off - EPEL_EXTRA_BEFORE,
1707  y_off - EPEL_EXTRA_BEFORE,
1708  pic_width, pic_height);
1709 
1710  src0 = lc->edge_emu_buffer + buf_offset0;
1711  srcstride = edge_emu_stride;
1712  }
1713  if (!weight_flag)
1714  s->hevcdsp.put_hevc_epel_uni[idx][!!my][!!mx](dst0, dststride, src0, srcstride,
1715  block_h, _mx, _my, block_w);
1716  else
1717  s->hevcdsp.put_hevc_epel_uni_w[idx][!!my][!!mx](dst0, dststride, src0, srcstride,
1718  block_h, s->sh.chroma_log2_weight_denom,
1719  chroma_weight, chroma_offset, _mx, _my, block_w);
1720 }
1721 
1722 /**
1723  * 8.5.3.2.2.2 Chroma sample bidirectional interpolation process
1724  *
1725  * @param s HEVC decoding context
1726  * @param dst target buffer for block data at block position
1727  * @param dststride stride of the dst buffer
1728  * @param ref0 reference picture0 buffer at origin (0, 0)
1729  * @param mv0 motion vector0 (relative to block position) to get pixel data from
1730  * @param x_off horizontal position of block from origin (0, 0)
1731  * @param y_off vertical position of block from origin (0, 0)
1732  * @param block_w width of block
1733  * @param block_h height of block
1734  * @param ref1 reference picture1 buffer at origin (0, 0)
1735  * @param mv1 motion vector1 (relative to block position) to get pixel data from
1736  * @param current_mv current motion vector structure
1737  * @param cidx chroma component(cb, cr)
1738  */
1739 static void chroma_mc_bi(HEVCLocalContext *lc, uint8_t *dst0, ptrdiff_t dststride,
1740  const AVFrame *ref0, const AVFrame *ref1,
1741  int x_off, int y_off, int block_w, int block_h, const MvField *current_mv, int cidx)
1742 {
1743  const HEVCContext *const s = lc->parent;
1744  const uint8_t *src1 = ref0->data[cidx+1];
1745  const uint8_t *src2 = ref1->data[cidx+1];
1746  ptrdiff_t src1stride = ref0->linesize[cidx+1];
1747  ptrdiff_t src2stride = ref1->linesize[cidx+1];
1748  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1749  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1750  int pic_width = s->ps.sps->width >> s->ps.sps->hshift[1];
1751  int pic_height = s->ps.sps->height >> s->ps.sps->vshift[1];
1752  const Mv *const mv0 = &current_mv->mv[0];
1753  const Mv *const mv1 = &current_mv->mv[1];
1754  int hshift = s->ps.sps->hshift[1];
1755  int vshift = s->ps.sps->vshift[1];
1756 
1757  intptr_t mx0 = av_mod_uintp2(mv0->x, 2 + hshift);
1758  intptr_t my0 = av_mod_uintp2(mv0->y, 2 + vshift);
1759  intptr_t mx1 = av_mod_uintp2(mv1->x, 2 + hshift);
1760  intptr_t my1 = av_mod_uintp2(mv1->y, 2 + vshift);
1761  intptr_t _mx0 = mx0 << (1 - hshift);
1762  intptr_t _my0 = my0 << (1 - vshift);
1763  intptr_t _mx1 = mx1 << (1 - hshift);
1764  intptr_t _my1 = my1 << (1 - vshift);
1765 
1766  int x_off0 = x_off + (mv0->x >> (2 + hshift));
1767  int y_off0 = y_off + (mv0->y >> (2 + vshift));
1768  int x_off1 = x_off + (mv1->x >> (2 + hshift));
1769  int y_off1 = y_off + (mv1->y >> (2 + vshift));
1770  int idx = hevc_pel_weight[block_w];
1771  src1 += y_off0 * src1stride + (int)((unsigned)x_off0 << s->ps.sps->pixel_shift);
1772  src2 += y_off1 * src2stride + (int)((unsigned)x_off1 << s->ps.sps->pixel_shift);
1773 
1774  if (x_off0 < EPEL_EXTRA_BEFORE || y_off0 < EPEL_EXTRA_AFTER ||
1775  x_off0 >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1776  y_off0 >= pic_height - block_h - EPEL_EXTRA_AFTER) {
1777  const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1778  int offset1 = EPEL_EXTRA_BEFORE * (src1stride + (1 << s->ps.sps->pixel_shift));
1779  int buf_offset1 = EPEL_EXTRA_BEFORE *
1780  (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1781 
1782  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src1 - offset1,
1783  edge_emu_stride, src1stride,
1784  block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1785  x_off0 - EPEL_EXTRA_BEFORE,
1786  y_off0 - EPEL_EXTRA_BEFORE,
1787  pic_width, pic_height);
1788 
1789  src1 = lc->edge_emu_buffer + buf_offset1;
1790  src1stride = edge_emu_stride;
1791  }
1792 
1793  if (x_off1 < EPEL_EXTRA_BEFORE || y_off1 < EPEL_EXTRA_AFTER ||
1794  x_off1 >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1795  y_off1 >= pic_height - block_h - EPEL_EXTRA_AFTER) {
1796  const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1797  int offset1 = EPEL_EXTRA_BEFORE * (src2stride + (1 << s->ps.sps->pixel_shift));
1798  int buf_offset1 = EPEL_EXTRA_BEFORE *
1799  (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1800 
1801  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer2, src2 - offset1,
1802  edge_emu_stride, src2stride,
1803  block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1804  x_off1 - EPEL_EXTRA_BEFORE,
1805  y_off1 - EPEL_EXTRA_BEFORE,
1806  pic_width, pic_height);
1807 
1808  src2 = lc->edge_emu_buffer2 + buf_offset1;
1809  src2stride = edge_emu_stride;
1810  }
1811 
1812  s->hevcdsp.put_hevc_epel[idx][!!my0][!!mx0](lc->tmp, src1, src1stride,
1813  block_h, _mx0, _my0, block_w);
1814  if (!weight_flag)
1815  s->hevcdsp.put_hevc_epel_bi[idx][!!my1][!!mx1](dst0, s->frame->linesize[cidx+1],
1816  src2, src2stride, lc->tmp,
1817  block_h, _mx1, _my1, block_w);
1818  else
1819  s->hevcdsp.put_hevc_epel_bi_w[idx][!!my1][!!mx1](dst0, s->frame->linesize[cidx+1],
1820  src2, src2stride, lc->tmp,
1821  block_h,
1822  s->sh.chroma_log2_weight_denom,
1823  s->sh.chroma_weight_l0[current_mv->ref_idx[0]][cidx],
1824  s->sh.chroma_weight_l1[current_mv->ref_idx[1]][cidx],
1825  s->sh.chroma_offset_l0[current_mv->ref_idx[0]][cidx],
1826  s->sh.chroma_offset_l1[current_mv->ref_idx[1]][cidx],
1827  _mx1, _my1, block_w);
1828 }
1829 
1830 static void hevc_await_progress(const HEVCContext *s, const HEVCFrame *ref,
1831  const Mv *mv, int y0, int height)
1832 {
1833  if (s->threads_type == FF_THREAD_FRAME ) {
1834  int y = FFMAX(0, (mv->y >> 2) + y0 + height + 9);
1835 
1836  ff_thread_await_progress(&ref->tf, y, 0);
1837  }
1838 }
1839 
1840 static void hevc_luma_mv_mvp_mode(HEVCLocalContext *lc, int x0, int y0, int nPbW,
1841  int nPbH, int log2_cb_size, int part_idx,
1842  int merge_idx, MvField *mv)
1843 {
1844  const HEVCContext *const s = lc->parent;
1845  enum InterPredIdc inter_pred_idc = PRED_L0;
1846  int mvp_flag;
1847 
1848  ff_hevc_set_neighbour_available(lc, x0, y0, nPbW, nPbH);
1849  mv->pred_flag = 0;
1850  if (s->sh.slice_type == HEVC_SLICE_B)
1851  inter_pred_idc = ff_hevc_inter_pred_idc_decode(lc, nPbW, nPbH);
1852 
1853  if (inter_pred_idc != PRED_L1) {
1854  if (s->sh.nb_refs[L0])
1855  mv->ref_idx[0]= ff_hevc_ref_idx_lx_decode(lc, s->sh.nb_refs[L0]);
1856 
1857  mv->pred_flag = PF_L0;
1858  ff_hevc_hls_mvd_coding(lc, x0, y0, 0);
1859  mvp_flag = ff_hevc_mvp_lx_flag_decode(lc);
1860  ff_hevc_luma_mv_mvp_mode(lc, x0, y0, nPbW, nPbH, log2_cb_size,
1861  part_idx, merge_idx, mv, mvp_flag, 0);
1862  mv->mv[0].x += lc->pu.mvd.x;
1863  mv->mv[0].y += lc->pu.mvd.y;
1864  }
1865 
1866  if (inter_pred_idc != PRED_L0) {
1867  if (s->sh.nb_refs[L1])
1868  mv->ref_idx[1]= ff_hevc_ref_idx_lx_decode(lc, s->sh.nb_refs[L1]);
1869 
1870  if (s->sh.mvd_l1_zero_flag == 1 && inter_pred_idc == PRED_BI) {
1871  AV_ZERO32(&lc->pu.mvd);
1872  } else {
1873  ff_hevc_hls_mvd_coding(lc, x0, y0, 1);
1874  }
1875 
1876  mv->pred_flag += PF_L1;
1877  mvp_flag = ff_hevc_mvp_lx_flag_decode(lc);
1878  ff_hevc_luma_mv_mvp_mode(lc, x0, y0, nPbW, nPbH, log2_cb_size,
1879  part_idx, merge_idx, mv, mvp_flag, 1);
1880  mv->mv[1].x += lc->pu.mvd.x;
1881  mv->mv[1].y += lc->pu.mvd.y;
1882  }
1883 }
1884 
1885 static void hls_prediction_unit(HEVCLocalContext *lc, int x0, int y0,
1886  int nPbW, int nPbH,
1887  int log2_cb_size, int partIdx, int idx)
1888 {
1889 #define POS(c_idx, x, y) \
1890  &s->frame->data[c_idx][((y) >> s->ps.sps->vshift[c_idx]) * s->frame->linesize[c_idx] + \
1891  (((x) >> s->ps.sps->hshift[c_idx]) << s->ps.sps->pixel_shift)]
1892  const HEVCContext *const s = lc->parent;
1893  int merge_idx = 0;
1894  struct MvField current_mv = {{{ 0 }}};
1895 
1896  int min_pu_width = s->ps.sps->min_pu_width;
1897 
1898  MvField *tab_mvf = s->ref->tab_mvf;
1899  const RefPicList *refPicList = s->ref->refPicList;
1900  const HEVCFrame *ref0 = NULL, *ref1 = NULL;
1901  uint8_t *dst0 = POS(0, x0, y0);
1902  uint8_t *dst1 = POS(1, x0, y0);
1903  uint8_t *dst2 = POS(2, x0, y0);
1904  int log2_min_cb_size = s->ps.sps->log2_min_cb_size;
1905  int min_cb_width = s->ps.sps->min_cb_width;
1906  int x_cb = x0 >> log2_min_cb_size;
1907  int y_cb = y0 >> log2_min_cb_size;
1908  int x_pu, y_pu;
1909  int i, j;
1910 
1911  int skip_flag = SAMPLE_CTB(s->skip_flag, x_cb, y_cb);
1912 
1913  if (!skip_flag)
1915 
1916  if (skip_flag || lc->pu.merge_flag) {
1917  if (s->sh.max_num_merge_cand > 1)
1918  merge_idx = ff_hevc_merge_idx_decode(lc);
1919  else
1920  merge_idx = 0;
1921 
1922  ff_hevc_luma_mv_merge_mode(lc, x0, y0, nPbW, nPbH, log2_cb_size,
1923  partIdx, merge_idx, &current_mv);
1924  } else {
1925  hevc_luma_mv_mvp_mode(lc, x0, y0, nPbW, nPbH, log2_cb_size,
1926  partIdx, merge_idx, &current_mv);
1927  }
1928 
1929  x_pu = x0 >> s->ps.sps->log2_min_pu_size;
1930  y_pu = y0 >> s->ps.sps->log2_min_pu_size;
1931 
1932  for (j = 0; j < nPbH >> s->ps.sps->log2_min_pu_size; j++)
1933  for (i = 0; i < nPbW >> s->ps.sps->log2_min_pu_size; i++)
1934  tab_mvf[(y_pu + j) * min_pu_width + x_pu + i] = current_mv;
1935 
1936  if (current_mv.pred_flag & PF_L0) {
1937  ref0 = refPicList[0].ref[current_mv.ref_idx[0]];
1938  if (!ref0)
1939  return;
1940  hevc_await_progress(s, ref0, &current_mv.mv[0], y0, nPbH);
1941  }
1942  if (current_mv.pred_flag & PF_L1) {
1943  ref1 = refPicList[1].ref[current_mv.ref_idx[1]];
1944  if (!ref1)
1945  return;
1946  hevc_await_progress(s, ref1, &current_mv.mv[1], y0, nPbH);
1947  }
1948 
1949  if (current_mv.pred_flag == PF_L0) {
1950  int x0_c = x0 >> s->ps.sps->hshift[1];
1951  int y0_c = y0 >> s->ps.sps->vshift[1];
1952  int nPbW_c = nPbW >> s->ps.sps->hshift[1];
1953  int nPbH_c = nPbH >> s->ps.sps->vshift[1];
1954 
1955  luma_mc_uni(lc, dst0, s->frame->linesize[0], ref0->frame,
1956  &current_mv.mv[0], x0, y0, nPbW, nPbH,
1957  s->sh.luma_weight_l0[current_mv.ref_idx[0]],
1958  s->sh.luma_offset_l0[current_mv.ref_idx[0]]);
1959 
1960  if (s->ps.sps->chroma_format_idc) {
1961  chroma_mc_uni(lc, dst1, s->frame->linesize[1], ref0->frame->data[1], ref0->frame->linesize[1],
1962  0, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1963  s->sh.chroma_weight_l0[current_mv.ref_idx[0]][0], s->sh.chroma_offset_l0[current_mv.ref_idx[0]][0]);
1964  chroma_mc_uni(lc, dst2, s->frame->linesize[2], ref0->frame->data[2], ref0->frame->linesize[2],
1965  0, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1966  s->sh.chroma_weight_l0[current_mv.ref_idx[0]][1], s->sh.chroma_offset_l0[current_mv.ref_idx[0]][1]);
1967  }
1968  } else if (current_mv.pred_flag == PF_L1) {
1969  int x0_c = x0 >> s->ps.sps->hshift[1];
1970  int y0_c = y0 >> s->ps.sps->vshift[1];
1971  int nPbW_c = nPbW >> s->ps.sps->hshift[1];
1972  int nPbH_c = nPbH >> s->ps.sps->vshift[1];
1973 
1974  luma_mc_uni(lc, dst0, s->frame->linesize[0], ref1->frame,
1975  &current_mv.mv[1], x0, y0, nPbW, nPbH,
1976  s->sh.luma_weight_l1[current_mv.ref_idx[1]],
1977  s->sh.luma_offset_l1[current_mv.ref_idx[1]]);
1978 
1979  if (s->ps.sps->chroma_format_idc) {
1980  chroma_mc_uni(lc, dst1, s->frame->linesize[1], ref1->frame->data[1], ref1->frame->linesize[1],
1981  1, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1982  s->sh.chroma_weight_l1[current_mv.ref_idx[1]][0], s->sh.chroma_offset_l1[current_mv.ref_idx[1]][0]);
1983 
1984  chroma_mc_uni(lc, dst2, s->frame->linesize[2], ref1->frame->data[2], ref1->frame->linesize[2],
1985  1, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1986  s->sh.chroma_weight_l1[current_mv.ref_idx[1]][1], s->sh.chroma_offset_l1[current_mv.ref_idx[1]][1]);
1987  }
1988  } else if (current_mv.pred_flag == PF_BI) {
1989  int x0_c = x0 >> s->ps.sps->hshift[1];
1990  int y0_c = y0 >> s->ps.sps->vshift[1];
1991  int nPbW_c = nPbW >> s->ps.sps->hshift[1];
1992  int nPbH_c = nPbH >> s->ps.sps->vshift[1];
1993 
1994  luma_mc_bi(lc, dst0, s->frame->linesize[0], ref0->frame,
1995  &current_mv.mv[0], x0, y0, nPbW, nPbH,
1996  ref1->frame, &current_mv.mv[1], &current_mv);
1997 
1998  if (s->ps.sps->chroma_format_idc) {
1999  chroma_mc_bi(lc, dst1, s->frame->linesize[1], ref0->frame, ref1->frame,
2000  x0_c, y0_c, nPbW_c, nPbH_c, &current_mv, 0);
2001 
2002  chroma_mc_bi(lc, dst2, s->frame->linesize[2], ref0->frame, ref1->frame,
2003  x0_c, y0_c, nPbW_c, nPbH_c, &current_mv, 1);
2004  }
2005  }
2006 }
2007 
2008 /**
2009  * 8.4.1
2010  */
2011 static int luma_intra_pred_mode(HEVCLocalContext *lc, int x0, int y0, int pu_size,
2012  int prev_intra_luma_pred_flag)
2013 {
2014  const HEVCContext *const s = lc->parent;
2015  int x_pu = x0 >> s->ps.sps->log2_min_pu_size;
2016  int y_pu = y0 >> s->ps.sps->log2_min_pu_size;
2017  int min_pu_width = s->ps.sps->min_pu_width;
2018  int size_in_pus = pu_size >> s->ps.sps->log2_min_pu_size;
2019  int x0b = av_mod_uintp2(x0, s->ps.sps->log2_ctb_size);
2020  int y0b = av_mod_uintp2(y0, s->ps.sps->log2_ctb_size);
2021 
2022  int cand_up = (lc->ctb_up_flag || y0b) ?
2023  s->tab_ipm[(y_pu - 1) * min_pu_width + x_pu] : INTRA_DC;
2024  int cand_left = (lc->ctb_left_flag || x0b) ?
2025  s->tab_ipm[y_pu * min_pu_width + x_pu - 1] : INTRA_DC;
2026 
2027  int y_ctb = (y0 >> (s->ps.sps->log2_ctb_size)) << (s->ps.sps->log2_ctb_size);
2028 
2029  MvField *tab_mvf = s->ref->tab_mvf;
2030  int intra_pred_mode;
2031  int candidate[3];
2032  int i, j;
2033 
2034  // intra_pred_mode prediction does not cross vertical CTB boundaries
2035  if ((y0 - 1) < y_ctb)
2036  cand_up = INTRA_DC;
2037 
2038  if (cand_left == cand_up) {
2039  if (cand_left < 2) {
2040  candidate[0] = INTRA_PLANAR;
2041  candidate[1] = INTRA_DC;
2042  candidate[2] = INTRA_ANGULAR_26;
2043  } else {
2044  candidate[0] = cand_left;
2045  candidate[1] = 2 + ((cand_left - 2 - 1 + 32) & 31);
2046  candidate[2] = 2 + ((cand_left - 2 + 1) & 31);
2047  }
2048  } else {
2049  candidate[0] = cand_left;
2050  candidate[1] = cand_up;
2051  if (candidate[0] != INTRA_PLANAR && candidate[1] != INTRA_PLANAR) {
2052  candidate[2] = INTRA_PLANAR;
2053  } else if (candidate[0] != INTRA_DC && candidate[1] != INTRA_DC) {
2054  candidate[2] = INTRA_DC;
2055  } else {
2056  candidate[2] = INTRA_ANGULAR_26;
2057  }
2058  }
2059 
2060  if (prev_intra_luma_pred_flag) {
2061  intra_pred_mode = candidate[lc->pu.mpm_idx];
2062  } else {
2063  if (candidate[0] > candidate[1])
2064  FFSWAP(uint8_t, candidate[0], candidate[1]);
2065  if (candidate[0] > candidate[2])
2066  FFSWAP(uint8_t, candidate[0], candidate[2]);
2067  if (candidate[1] > candidate[2])
2068  FFSWAP(uint8_t, candidate[1], candidate[2]);
2069 
2070  intra_pred_mode = lc->pu.rem_intra_luma_pred_mode;
2071  for (i = 0; i < 3; i++)
2072  if (intra_pred_mode >= candidate[i])
2073  intra_pred_mode++;
2074  }
2075 
2076  /* write the intra prediction units into the mv array */
2077  if (!size_in_pus)
2078  size_in_pus = 1;
2079  for (i = 0; i < size_in_pus; i++) {
2080  memset(&s->tab_ipm[(y_pu + i) * min_pu_width + x_pu],
2081  intra_pred_mode, size_in_pus);
2082 
2083  for (j = 0; j < size_in_pus; j++) {
2084  tab_mvf[(y_pu + j) * min_pu_width + x_pu + i].pred_flag = PF_INTRA;
2085  }
2086  }
2087 
2088  return intra_pred_mode;
2089 }
2090 
2091 static av_always_inline void set_ct_depth(const HEVCContext *s, int x0, int y0,
2092  int log2_cb_size, int ct_depth)
2093 {
2094  int length = (1 << log2_cb_size) >> s->ps.sps->log2_min_cb_size;
2095  int x_cb = x0 >> s->ps.sps->log2_min_cb_size;
2096  int y_cb = y0 >> s->ps.sps->log2_min_cb_size;
2097  int y;
2098 
2099  for (y = 0; y < length; y++)
2100  memset(&s->tab_ct_depth[(y_cb + y) * s->ps.sps->min_cb_width + x_cb],
2101  ct_depth, length);
2102 }
2103 
2104 static const uint8_t tab_mode_idx[] = {
2105  0, 1, 2, 2, 2, 2, 3, 5, 7, 8, 10, 12, 13, 15, 17, 18, 19, 20,
2106  21, 22, 23, 23, 24, 24, 25, 25, 26, 27, 27, 28, 28, 29, 29, 30, 31};
2107 
2108 static void intra_prediction_unit(HEVCLocalContext *lc, int x0, int y0,
2109  int log2_cb_size)
2110 {
2111  const HEVCContext *const s = lc->parent;
2112  static const uint8_t intra_chroma_table[4] = { 0, 26, 10, 1 };
2113  uint8_t prev_intra_luma_pred_flag[4];
2114  int split = lc->cu.part_mode == PART_NxN;
2115  int pb_size = (1 << log2_cb_size) >> split;
2116  int side = split + 1;
2117  int chroma_mode;
2118  int i, j;
2119 
2120  for (i = 0; i < side; i++)
2121  for (j = 0; j < side; j++)
2122  prev_intra_luma_pred_flag[2 * i + j] = ff_hevc_prev_intra_luma_pred_flag_decode(lc);
2123 
2124  for (i = 0; i < side; i++) {
2125  for (j = 0; j < side; j++) {
2126  if (prev_intra_luma_pred_flag[2 * i + j])
2127  lc->pu.mpm_idx = ff_hevc_mpm_idx_decode(lc);
2128  else
2130 
2131  lc->pu.intra_pred_mode[2 * i + j] =
2132  luma_intra_pred_mode(lc, x0 + pb_size * j, y0 + pb_size * i, pb_size,
2133  prev_intra_luma_pred_flag[2 * i + j]);
2134  }
2135  }
2136 
2137  if (s->ps.sps->chroma_format_idc == 3) {
2138  for (i = 0; i < side; i++) {
2139  for (j = 0; j < side; j++) {
2140  lc->pu.chroma_mode_c[2 * i + j] = chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(lc);
2141  if (chroma_mode != 4) {
2142  if (lc->pu.intra_pred_mode[2 * i + j] == intra_chroma_table[chroma_mode])
2143  lc->pu.intra_pred_mode_c[2 * i + j] = 34;
2144  else
2145  lc->pu.intra_pred_mode_c[2 * i + j] = intra_chroma_table[chroma_mode];
2146  } else {
2147  lc->pu.intra_pred_mode_c[2 * i + j] = lc->pu.intra_pred_mode[2 * i + j];
2148  }
2149  }
2150  }
2151  } else if (s->ps.sps->chroma_format_idc == 2) {
2152  int mode_idx;
2153  lc->pu.chroma_mode_c[0] = chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(lc);
2154  if (chroma_mode != 4) {
2155  if (lc->pu.intra_pred_mode[0] == intra_chroma_table[chroma_mode])
2156  mode_idx = 34;
2157  else
2158  mode_idx = intra_chroma_table[chroma_mode];
2159  } else {
2160  mode_idx = lc->pu.intra_pred_mode[0];
2161  }
2162  lc->pu.intra_pred_mode_c[0] = tab_mode_idx[mode_idx];
2163  } else if (s->ps.sps->chroma_format_idc != 0) {
2164  chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(lc);
2165  if (chroma_mode != 4) {
2166  if (lc->pu.intra_pred_mode[0] == intra_chroma_table[chroma_mode])
2167  lc->pu.intra_pred_mode_c[0] = 34;
2168  else
2169  lc->pu.intra_pred_mode_c[0] = intra_chroma_table[chroma_mode];
2170  } else {
2171  lc->pu.intra_pred_mode_c[0] = lc->pu.intra_pred_mode[0];
2172  }
2173  }
2174 }
2175 
2177  int x0, int y0,
2178  int log2_cb_size)
2179 {
2180  const HEVCContext *const s = lc->parent;
2181  int pb_size = 1 << log2_cb_size;
2182  int size_in_pus = pb_size >> s->ps.sps->log2_min_pu_size;
2183  int min_pu_width = s->ps.sps->min_pu_width;
2184  MvField *tab_mvf = s->ref->tab_mvf;
2185  int x_pu = x0 >> s->ps.sps->log2_min_pu_size;
2186  int y_pu = y0 >> s->ps.sps->log2_min_pu_size;
2187  int j, k;
2188 
2189  if (size_in_pus == 0)
2190  size_in_pus = 1;
2191  for (j = 0; j < size_in_pus; j++)
2192  memset(&s->tab_ipm[(y_pu + j) * min_pu_width + x_pu], INTRA_DC, size_in_pus);
2193  if (lc->cu.pred_mode == MODE_INTRA)
2194  for (j = 0; j < size_in_pus; j++)
2195  for (k = 0; k < size_in_pus; k++)
2196  tab_mvf[(y_pu + j) * min_pu_width + x_pu + k].pred_flag = PF_INTRA;
2197 }
2198 
2199 static int hls_coding_unit(HEVCLocalContext *lc, const HEVCContext *s, int x0, int y0, int log2_cb_size)
2200 {
2201  int cb_size = 1 << log2_cb_size;
2202  int log2_min_cb_size = s->ps.sps->log2_min_cb_size;
2203  int length = cb_size >> log2_min_cb_size;
2204  int min_cb_width = s->ps.sps->min_cb_width;
2205  int x_cb = x0 >> log2_min_cb_size;
2206  int y_cb = y0 >> log2_min_cb_size;
2207  int idx = log2_cb_size - 2;
2208  int qp_block_mask = (1<<(s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth)) - 1;
2209  int x, y, ret;
2210 
2211  lc->cu.x = x0;
2212  lc->cu.y = y0;
2213  lc->cu.pred_mode = MODE_INTRA;
2214  lc->cu.part_mode = PART_2Nx2N;
2215  lc->cu.intra_split_flag = 0;
2216 
2217  SAMPLE_CTB(s->skip_flag, x_cb, y_cb) = 0;
2218  for (x = 0; x < 4; x++)
2219  lc->pu.intra_pred_mode[x] = 1;
2220  if (s->ps.pps->transquant_bypass_enable_flag) {
2222  if (lc->cu.cu_transquant_bypass_flag)
2223  set_deblocking_bypass(s, x0, y0, log2_cb_size);
2224  } else
2225  lc->cu.cu_transquant_bypass_flag = 0;
2226 
2227  if (s->sh.slice_type != HEVC_SLICE_I) {
2228  uint8_t skip_flag = ff_hevc_skip_flag_decode(lc, x0, y0, x_cb, y_cb);
2229 
2230  x = y_cb * min_cb_width + x_cb;
2231  for (y = 0; y < length; y++) {
2232  memset(&s->skip_flag[x], skip_flag, length);
2233  x += min_cb_width;
2234  }
2235  lc->cu.pred_mode = skip_flag ? MODE_SKIP : MODE_INTER;
2236  } else {
2237  x = y_cb * min_cb_width + x_cb;
2238  for (y = 0; y < length; y++) {
2239  memset(&s->skip_flag[x], 0, length);
2240  x += min_cb_width;
2241  }
2242  }
2243 
2244  if (SAMPLE_CTB(s->skip_flag, x_cb, y_cb)) {
2245  hls_prediction_unit(lc, x0, y0, cb_size, cb_size, log2_cb_size, 0, idx);
2246  intra_prediction_unit_default_value(lc, x0, y0, log2_cb_size);
2247 
2248  if (!s->sh.disable_deblocking_filter_flag)
2249  ff_hevc_deblocking_boundary_strengths(lc, x0, y0, log2_cb_size);
2250  } else {
2251  int pcm_flag = 0;
2252 
2253  if (s->sh.slice_type != HEVC_SLICE_I)
2255  if (lc->cu.pred_mode != MODE_INTRA ||
2256  log2_cb_size == s->ps.sps->log2_min_cb_size) {
2257  lc->cu.part_mode = ff_hevc_part_mode_decode(lc, log2_cb_size);
2258  lc->cu.intra_split_flag = lc->cu.part_mode == PART_NxN &&
2259  lc->cu.pred_mode == MODE_INTRA;
2260  }
2261 
2262  if (lc->cu.pred_mode == MODE_INTRA) {
2263  if (lc->cu.part_mode == PART_2Nx2N && s->ps.sps->pcm_enabled_flag &&
2264  log2_cb_size >= s->ps.sps->pcm.log2_min_pcm_cb_size &&
2265  log2_cb_size <= s->ps.sps->pcm.log2_max_pcm_cb_size) {
2266  pcm_flag = ff_hevc_pcm_flag_decode(lc);
2267  }
2268  if (pcm_flag) {
2269  intra_prediction_unit_default_value(lc, x0, y0, log2_cb_size);
2270  ret = hls_pcm_sample(lc, x0, y0, log2_cb_size);
2271  if (s->ps.sps->pcm.loop_filter_disable_flag)
2272  set_deblocking_bypass(s, x0, y0, log2_cb_size);
2273 
2274  if (ret < 0)
2275  return ret;
2276  } else {
2277  intra_prediction_unit(lc, x0, y0, log2_cb_size);
2278  }
2279  } else {
2280  intra_prediction_unit_default_value(lc, x0, y0, log2_cb_size);
2281  switch (lc->cu.part_mode) {
2282  case PART_2Nx2N:
2283  hls_prediction_unit(lc, x0, y0, cb_size, cb_size, log2_cb_size, 0, idx);
2284  break;
2285  case PART_2NxN:
2286  hls_prediction_unit(lc, x0, y0, cb_size, cb_size / 2, log2_cb_size, 0, idx);
2287  hls_prediction_unit(lc, x0, y0 + cb_size / 2, cb_size, cb_size / 2, log2_cb_size, 1, idx);
2288  break;
2289  case PART_Nx2N:
2290  hls_prediction_unit(lc, x0, y0, cb_size / 2, cb_size, log2_cb_size, 0, idx - 1);
2291  hls_prediction_unit(lc, x0 + cb_size / 2, y0, cb_size / 2, cb_size, log2_cb_size, 1, idx - 1);
2292  break;
2293  case PART_2NxnU:
2294  hls_prediction_unit(lc, x0, y0, cb_size, cb_size / 4, log2_cb_size, 0, idx);
2295  hls_prediction_unit(lc, x0, y0 + cb_size / 4, cb_size, cb_size * 3 / 4, log2_cb_size, 1, idx);
2296  break;
2297  case PART_2NxnD:
2298  hls_prediction_unit(lc, x0, y0, cb_size, cb_size * 3 / 4, log2_cb_size, 0, idx);
2299  hls_prediction_unit(lc, x0, y0 + cb_size * 3 / 4, cb_size, cb_size / 4, log2_cb_size, 1, idx);
2300  break;
2301  case PART_nLx2N:
2302  hls_prediction_unit(lc, x0, y0, cb_size / 4, cb_size, log2_cb_size, 0, idx - 2);
2303  hls_prediction_unit(lc, x0 + cb_size / 4, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 1, idx - 2);
2304  break;
2305  case PART_nRx2N:
2306  hls_prediction_unit(lc, x0, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 0, idx - 2);
2307  hls_prediction_unit(lc, x0 + cb_size * 3 / 4, y0, cb_size / 4, cb_size, log2_cb_size, 1, idx - 2);
2308  break;
2309  case PART_NxN:
2310  hls_prediction_unit(lc, x0, y0, cb_size / 2, cb_size / 2, log2_cb_size, 0, idx - 1);
2311  hls_prediction_unit(lc, x0 + cb_size / 2, y0, cb_size / 2, cb_size / 2, log2_cb_size, 1, idx - 1);
2312  hls_prediction_unit(lc, x0, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 2, idx - 1);
2313  hls_prediction_unit(lc, x0 + cb_size / 2, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 3, idx - 1);
2314  break;
2315  }
2316  }
2317 
2318  if (!pcm_flag) {
2319  int rqt_root_cbf = 1;
2320 
2321  if (lc->cu.pred_mode != MODE_INTRA &&
2322  !(lc->cu.part_mode == PART_2Nx2N && lc->pu.merge_flag)) {
2323  rqt_root_cbf = ff_hevc_no_residual_syntax_flag_decode(lc);
2324  }
2325  if (rqt_root_cbf) {
2326  const static int cbf[2] = { 0 };
2327  lc->cu.max_trafo_depth = lc->cu.pred_mode == MODE_INTRA ?
2328  s->ps.sps->max_transform_hierarchy_depth_intra + lc->cu.intra_split_flag :
2329  s->ps.sps->max_transform_hierarchy_depth_inter;
2330  ret = hls_transform_tree(lc, x0, y0, x0, y0, x0, y0,
2331  log2_cb_size,
2332  log2_cb_size, 0, 0, cbf, cbf);
2333  if (ret < 0)
2334  return ret;
2335  } else {
2336  if (!s->sh.disable_deblocking_filter_flag)
2337  ff_hevc_deblocking_boundary_strengths(lc, x0, y0, log2_cb_size);
2338  }
2339  }
2340  }
2341 
2342  if (s->ps.pps->cu_qp_delta_enabled_flag && lc->tu.is_cu_qp_delta_coded == 0)
2343  ff_hevc_set_qPy(lc, x0, y0, log2_cb_size);
2344 
2345  x = y_cb * min_cb_width + x_cb;
2346  for (y = 0; y < length; y++) {
2347  memset(&s->qp_y_tab[x], lc->qp_y, length);
2348  x += min_cb_width;
2349  }
2350 
2351  if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
2352  ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0) {
2353  lc->qPy_pred = lc->qp_y;
2354  }
2355 
2356  set_ct_depth(s, x0, y0, log2_cb_size, lc->ct_depth);
2357 
2358  return 0;
2359 }
2360 
2361 static int hls_coding_quadtree(HEVCLocalContext *lc, int x0, int y0,
2362  int log2_cb_size, int cb_depth)
2363 {
2364  const HEVCContext *const s = lc->parent;
2365  const int cb_size = 1 << log2_cb_size;
2366  int ret;
2367  int split_cu;
2368 
2369  lc->ct_depth = cb_depth;
2370  if (x0 + cb_size <= s->ps.sps->width &&
2371  y0 + cb_size <= s->ps.sps->height &&
2372  log2_cb_size > s->ps.sps->log2_min_cb_size) {
2373  split_cu = ff_hevc_split_coding_unit_flag_decode(lc, cb_depth, x0, y0);
2374  } else {
2375  split_cu = (log2_cb_size > s->ps.sps->log2_min_cb_size);
2376  }
2377  if (s->ps.pps->cu_qp_delta_enabled_flag &&
2378  log2_cb_size >= s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth) {
2379  lc->tu.is_cu_qp_delta_coded = 0;
2380  lc->tu.cu_qp_delta = 0;
2381  }
2382 
2383  if (s->sh.cu_chroma_qp_offset_enabled_flag &&
2384  log2_cb_size >= s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_chroma_qp_offset_depth) {
2386  }
2387 
2388  if (split_cu) {
2389  int qp_block_mask = (1<<(s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth)) - 1;
2390  const int cb_size_split = cb_size >> 1;
2391  const int x1 = x0 + cb_size_split;
2392  const int y1 = y0 + cb_size_split;
2393 
2394  int more_data = 0;
2395 
2396  more_data = hls_coding_quadtree(lc, x0, y0, log2_cb_size - 1, cb_depth + 1);
2397  if (more_data < 0)
2398  return more_data;
2399 
2400  if (more_data && x1 < s->ps.sps->width) {
2401  more_data = hls_coding_quadtree(lc, x1, y0, log2_cb_size - 1, cb_depth + 1);
2402  if (more_data < 0)
2403  return more_data;
2404  }
2405  if (more_data && y1 < s->ps.sps->height) {
2406  more_data = hls_coding_quadtree(lc, x0, y1, log2_cb_size - 1, cb_depth + 1);
2407  if (more_data < 0)
2408  return more_data;
2409  }
2410  if (more_data && x1 < s->ps.sps->width &&
2411  y1 < s->ps.sps->height) {
2412  more_data = hls_coding_quadtree(lc, x1, y1, log2_cb_size - 1, cb_depth + 1);
2413  if (more_data < 0)
2414  return more_data;
2415  }
2416 
2417  if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
2418  ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0)
2419  lc->qPy_pred = lc->qp_y;
2420 
2421  if (more_data)
2422  return ((x1 + cb_size_split) < s->ps.sps->width ||
2423  (y1 + cb_size_split) < s->ps.sps->height);
2424  else
2425  return 0;
2426  } else {
2427  ret = hls_coding_unit(lc, s, x0, y0, log2_cb_size);
2428  if (ret < 0)
2429  return ret;
2430  if ((!((x0 + cb_size) %
2431  (1 << (s->ps.sps->log2_ctb_size))) ||
2432  (x0 + cb_size >= s->ps.sps->width)) &&
2433  (!((y0 + cb_size) %
2434  (1 << (s->ps.sps->log2_ctb_size))) ||
2435  (y0 + cb_size >= s->ps.sps->height))) {
2436  int end_of_slice_flag = ff_hevc_end_of_slice_flag_decode(lc);
2437  return !end_of_slice_flag;
2438  } else {
2439  return 1;
2440  }
2441  }
2442 
2443  return 0;
2444 }
2445 
2446 static void hls_decode_neighbour(HEVCLocalContext *lc, int x_ctb, int y_ctb,
2447  int ctb_addr_ts)
2448 {
2449  const HEVCContext *const s = lc->parent;
2450  int ctb_size = 1 << s->ps.sps->log2_ctb_size;
2451  int ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2452  int ctb_addr_in_slice = ctb_addr_rs - s->sh.slice_addr;
2453 
2454  s->tab_slice_address[ctb_addr_rs] = s->sh.slice_addr;
2455 
2456  if (s->ps.pps->entropy_coding_sync_enabled_flag) {
2457  if (x_ctb == 0 && (y_ctb & (ctb_size - 1)) == 0)
2458  lc->first_qp_group = 1;
2459  lc->end_of_tiles_x = s->ps.sps->width;
2460  } else if (s->ps.pps->tiles_enabled_flag) {
2461  if (ctb_addr_ts && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[ctb_addr_ts - 1]) {
2462  int idxX = s->ps.pps->col_idxX[x_ctb >> s->ps.sps->log2_ctb_size];
2463  lc->end_of_tiles_x = x_ctb + (s->ps.pps->column_width[idxX] << s->ps.sps->log2_ctb_size);
2464  lc->first_qp_group = 1;
2465  }
2466  } else {
2467  lc->end_of_tiles_x = s->ps.sps->width;
2468  }
2469 
2470  lc->end_of_tiles_y = FFMIN(y_ctb + ctb_size, s->ps.sps->height);
2471 
2472  lc->boundary_flags = 0;
2473  if (s->ps.pps->tiles_enabled_flag) {
2474  if (x_ctb > 0 && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - 1]])
2476  if (x_ctb > 0 && s->tab_slice_address[ctb_addr_rs] != s->tab_slice_address[ctb_addr_rs - 1])
2478  if (y_ctb > 0 && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - s->ps.sps->ctb_width]])
2480  if (y_ctb > 0 && s->tab_slice_address[ctb_addr_rs] != s->tab_slice_address[ctb_addr_rs - s->ps.sps->ctb_width])
2482  } else {
2483  if (ctb_addr_in_slice <= 0)
2485  if (ctb_addr_in_slice < s->ps.sps->ctb_width)
2487  }
2488 
2489  lc->ctb_left_flag = ((x_ctb > 0) && (ctb_addr_in_slice > 0) && !(lc->boundary_flags & BOUNDARY_LEFT_TILE));
2490  lc->ctb_up_flag = ((y_ctb > 0) && (ctb_addr_in_slice >= s->ps.sps->ctb_width) && !(lc->boundary_flags & BOUNDARY_UPPER_TILE));
2491  lc->ctb_up_right_flag = ((y_ctb > 0) && (ctb_addr_in_slice+1 >= s->ps.sps->ctb_width) && (s->ps.pps->tile_id[ctb_addr_ts] == s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs+1 - s->ps.sps->ctb_width]]));
2492  lc->ctb_up_left_flag = ((x_ctb > 0) && (y_ctb > 0) && (ctb_addr_in_slice-1 >= s->ps.sps->ctb_width) && (s->ps.pps->tile_id[ctb_addr_ts] == s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs-1 - s->ps.sps->ctb_width]]));
2493 }
2494 
2495 static int hls_decode_entry(AVCodecContext *avctxt, void *isFilterThread)
2496 {
2497  HEVCContext *s = avctxt->priv_data;
2498  HEVCLocalContext *const lc = s->HEVClc;
2499  int ctb_size = 1 << s->ps.sps->log2_ctb_size;
2500  int more_data = 1;
2501  int x_ctb = 0;
2502  int y_ctb = 0;
2503  int ctb_addr_ts = s->ps.pps->ctb_addr_rs_to_ts[s->sh.slice_ctb_addr_rs];
2504  int ret;
2505 
2506  if (!ctb_addr_ts && s->sh.dependent_slice_segment_flag) {
2507  av_log(s->avctx, AV_LOG_ERROR, "Impossible initial tile.\n");
2508  return AVERROR_INVALIDDATA;
2509  }
2510 
2511  if (s->sh.dependent_slice_segment_flag) {
2512  int prev_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts - 1];
2513  if (s->tab_slice_address[prev_rs] != s->sh.slice_addr) {
2514  av_log(s->avctx, AV_LOG_ERROR, "Previous slice segment missing\n");
2515  return AVERROR_INVALIDDATA;
2516  }
2517  }
2518 
2519  while (more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
2520  int ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2521 
2522  x_ctb = (ctb_addr_rs % ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size)) << s->ps.sps->log2_ctb_size;
2523  y_ctb = (ctb_addr_rs / ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size)) << s->ps.sps->log2_ctb_size;
2524  hls_decode_neighbour(lc, x_ctb, y_ctb, ctb_addr_ts);
2525 
2526  ret = ff_hevc_cabac_init(lc, ctb_addr_ts);
2527  if (ret < 0) {
2528  s->tab_slice_address[ctb_addr_rs] = -1;
2529  return ret;
2530  }
2531 
2532  hls_sao_param(lc, x_ctb >> s->ps.sps->log2_ctb_size, y_ctb >> s->ps.sps->log2_ctb_size);
2533 
2534  s->deblock[ctb_addr_rs].beta_offset = s->sh.beta_offset;
2535  s->deblock[ctb_addr_rs].tc_offset = s->sh.tc_offset;
2536  s->filter_slice_edges[ctb_addr_rs] = s->sh.slice_loop_filter_across_slices_enabled_flag;
2537 
2538  more_data = hls_coding_quadtree(lc, x_ctb, y_ctb, s->ps.sps->log2_ctb_size, 0);
2539  if (more_data < 0) {
2540  s->tab_slice_address[ctb_addr_rs] = -1;
2541  return more_data;
2542  }
2543 
2544 
2545  ctb_addr_ts++;
2546  ff_hevc_save_states(lc, ctb_addr_ts);
2547  ff_hevc_hls_filters(lc, x_ctb, y_ctb, ctb_size);
2548  }
2549 
2550  if (x_ctb + ctb_size >= s->ps.sps->width &&
2551  y_ctb + ctb_size >= s->ps.sps->height)
2552  ff_hevc_hls_filter(lc, x_ctb, y_ctb, ctb_size);
2553 
2554  return ctb_addr_ts;
2555 }
2556 
2558 {
2559  int arg[2];
2560  int ret[2];
2561 
2562  arg[0] = 0;
2563  arg[1] = 1;
2564 
2565  s->avctx->execute(s->avctx, hls_decode_entry, arg, ret , 1, sizeof(int));
2566  return ret[0];
2567 }
2568 static int hls_decode_entry_wpp(AVCodecContext *avctxt, void *hevc_lclist,
2569  int job, int self_id)
2570 {
2571  HEVCLocalContext *lc = ((HEVCLocalContext**)hevc_lclist)[self_id];
2572  const HEVCContext *const s = lc->parent;
2573  int ctb_size = 1 << s->ps.sps->log2_ctb_size;
2574  int more_data = 1;
2575  int ctb_row = job;
2576  int ctb_addr_rs = s->sh.slice_ctb_addr_rs + ctb_row * ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size);
2577  int ctb_addr_ts = s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs];
2578  int thread = ctb_row % s->threads_number;
2579  int ret;
2580 
2581  if(ctb_row) {
2582  ret = init_get_bits8(&lc->gb, s->data + s->sh.offset[ctb_row - 1], s->sh.size[ctb_row - 1]);
2583  if (ret < 0)
2584  goto error;
2585  ff_init_cabac_decoder(&lc->cc, s->data + s->sh.offset[(ctb_row)-1], s->sh.size[ctb_row - 1]);
2586  }
2587 
2588  while(more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
2589  int x_ctb = (ctb_addr_rs % s->ps.sps->ctb_width) << s->ps.sps->log2_ctb_size;
2590  int y_ctb = (ctb_addr_rs / s->ps.sps->ctb_width) << s->ps.sps->log2_ctb_size;
2591 
2592  hls_decode_neighbour(lc, x_ctb, y_ctb, ctb_addr_ts);
2593 
2594  ff_thread_await_progress2(s->avctx, ctb_row, thread, SHIFT_CTB_WPP);
2595 
2596  /* atomic_load's prototype requires a pointer to non-const atomic variable
2597  * (due to implementations via mutexes, where reads involve writes).
2598  * Of course, casting const away here is nevertheless safe. */
2599  if (atomic_load((atomic_int*)&s->wpp_err)) {
2600  ff_thread_report_progress2(s->avctx, ctb_row , thread, SHIFT_CTB_WPP);
2601  return 0;
2602  }
2603 
2604  ret = ff_hevc_cabac_init(lc, ctb_addr_ts);
2605  if (ret < 0)
2606  goto error;
2607  hls_sao_param(lc, x_ctb >> s->ps.sps->log2_ctb_size, y_ctb >> s->ps.sps->log2_ctb_size);
2608  more_data = hls_coding_quadtree(lc, x_ctb, y_ctb, s->ps.sps->log2_ctb_size, 0);
2609 
2610  if (more_data < 0) {
2611  ret = more_data;
2612  goto error;
2613  }
2614 
2615  ctb_addr_ts++;
2616 
2617  ff_hevc_save_states(lc, ctb_addr_ts);
2618  ff_thread_report_progress2(s->avctx, ctb_row, thread, 1);
2619  ff_hevc_hls_filters(lc, x_ctb, y_ctb, ctb_size);
2620 
2621  if (!more_data && (x_ctb+ctb_size) < s->ps.sps->width && ctb_row != s->sh.num_entry_point_offsets) {
2622  /* Casting const away here is safe, because it is an atomic operation. */
2623  atomic_store((atomic_int*)&s->wpp_err, 1);
2624  ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2625  return 0;
2626  }
2627 
2628  if ((x_ctb+ctb_size) >= s->ps.sps->width && (y_ctb+ctb_size) >= s->ps.sps->height ) {
2629  ff_hevc_hls_filter(lc, x_ctb, y_ctb, ctb_size);
2630  ff_thread_report_progress2(s->avctx, ctb_row , thread, SHIFT_CTB_WPP);
2631  return ctb_addr_ts;
2632  }
2633  ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2634  x_ctb+=ctb_size;
2635 
2636  if(x_ctb >= s->ps.sps->width) {
2637  break;
2638  }
2639  }
2640  ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2641 
2642  return 0;
2643 error:
2644  s->tab_slice_address[ctb_addr_rs] = -1;
2645  /* Casting const away here is safe, because it is an atomic operation. */
2646  atomic_store((atomic_int*)&s->wpp_err, 1);
2647  ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2648  return ret;
2649 }
2650 
2651 static int hls_slice_data_wpp(HEVCContext *s, const H2645NAL *nal)
2652 {
2653  const uint8_t *data = nal->data;
2654  int length = nal->size;
2655  HEVCLocalContext *lc = s->HEVClc;
2656  int *ret;
2657  int64_t offset;
2658  int64_t startheader, cmpt = 0;
2659  int i, j, res = 0;
2660 
2661  if (s->sh.slice_ctb_addr_rs + s->sh.num_entry_point_offsets * s->ps.sps->ctb_width >= s->ps.sps->ctb_width * s->ps.sps->ctb_height) {
2662  av_log(s->avctx, AV_LOG_ERROR, "WPP ctb addresses are wrong (%d %d %d %d)\n",
2663  s->sh.slice_ctb_addr_rs, s->sh.num_entry_point_offsets,
2664  s->ps.sps->ctb_width, s->ps.sps->ctb_height
2665  );
2666  return AVERROR_INVALIDDATA;
2667  }
2668 
2669  for (i = 1; i < s->threads_number; i++) {
2670  if (s->HEVClcList[i])
2671  continue;
2672  s->HEVClcList[i] = av_mallocz(sizeof(HEVCLocalContext));
2673  if (!s->HEVClcList[i])
2674  return AVERROR(ENOMEM);
2675  s->HEVClcList[i]->logctx = s->avctx;
2676  s->HEVClcList[i]->parent = s;
2677  s->HEVClcList[i]->common_cabac_state = &s->cabac;
2678  }
2679 
2680  offset = (lc->gb.index >> 3);
2681 
2682  for (j = 0, cmpt = 0, startheader = offset + s->sh.entry_point_offset[0]; j < nal->skipped_bytes; j++) {
2683  if (nal->skipped_bytes_pos[j] >= offset && nal->skipped_bytes_pos[j] < startheader) {
2684  startheader--;
2685  cmpt++;
2686  }
2687  }
2688 
2689  for (i = 1; i < s->sh.num_entry_point_offsets; i++) {
2690  offset += (s->sh.entry_point_offset[i - 1] - cmpt);
2691  for (j = 0, cmpt = 0, startheader = offset
2692  + s->sh.entry_point_offset[i]; j < nal->skipped_bytes; j++) {
2693  if (nal->skipped_bytes_pos[j] >= offset && nal->skipped_bytes_pos[j] < startheader) {
2694  startheader--;
2695  cmpt++;
2696  }
2697  }
2698  s->sh.size[i - 1] = s->sh.entry_point_offset[i] - cmpt;
2699  s->sh.offset[i - 1] = offset;
2700 
2701  }
2702  if (s->sh.num_entry_point_offsets != 0) {
2703  offset += s->sh.entry_point_offset[s->sh.num_entry_point_offsets - 1] - cmpt;
2704  if (length < offset) {
2705  av_log(s->avctx, AV_LOG_ERROR, "entry_point_offset table is corrupted\n");
2706  return AVERROR_INVALIDDATA;
2707  }
2708  s->sh.size[s->sh.num_entry_point_offsets - 1] = length - offset;
2709  s->sh.offset[s->sh.num_entry_point_offsets - 1] = offset;
2710 
2711  }
2712  s->data = data;
2713 
2714  for (i = 1; i < s->threads_number; i++) {
2715  s->HEVClcList[i]->first_qp_group = 1;
2716  s->HEVClcList[i]->qp_y = s->HEVClc->qp_y;
2717  }
2718 
2719  atomic_store(&s->wpp_err, 0);
2720  res = ff_slice_thread_allocz_entries(s->avctx, s->sh.num_entry_point_offsets + 1);
2721  if (res < 0)
2722  return res;
2723 
2724  ret = av_calloc(s->sh.num_entry_point_offsets + 1, sizeof(*ret));
2725  if (!ret)
2726  return AVERROR(ENOMEM);
2727 
2728  if (s->ps.pps->entropy_coding_sync_enabled_flag)
2729  s->avctx->execute2(s->avctx, hls_decode_entry_wpp, s->HEVClcList, ret, s->sh.num_entry_point_offsets + 1);
2730 
2731  for (i = 0; i <= s->sh.num_entry_point_offsets; i++)
2732  res += ret[i];
2733 
2734  av_free(ret);
2735  return res;
2736 }
2737 
2739 {
2740  AVFrame *out = s->ref->frame;
2741  int ret;
2742 
2743  // Decrement the mastering display flag when IRAP frame has no_rasl_output_flag=1
2744  // so the side data persists for the entire coded video sequence.
2745  if (s->sei.mastering_display.present > 0 &&
2746  IS_IRAP(s) && s->no_rasl_output_flag) {
2747  s->sei.mastering_display.present--;
2748  }
2749  if (s->sei.mastering_display.present) {
2750  // HEVC uses a g,b,r ordering, which we convert to a more natural r,g,b
2751  const int mapping[3] = {2, 0, 1};
2752  const int chroma_den = 50000;
2753  const int luma_den = 10000;
2754  int i;
2755  AVMasteringDisplayMetadata *metadata =
2757  if (!metadata)
2758  return AVERROR(ENOMEM);
2759 
2760  for (i = 0; i < 3; i++) {
2761  const int j = mapping[i];
2762  metadata->display_primaries[i][0].num = s->sei.mastering_display.display_primaries[j][0];
2763  metadata->display_primaries[i][0].den = chroma_den;
2764  metadata->display_primaries[i][1].num = s->sei.mastering_display.display_primaries[j][1];
2765  metadata->display_primaries[i][1].den = chroma_den;
2766  }
2767  metadata->white_point[0].num = s->sei.mastering_display.white_point[0];
2768  metadata->white_point[0].den = chroma_den;
2769  metadata->white_point[1].num = s->sei.mastering_display.white_point[1];
2770  metadata->white_point[1].den = chroma_den;
2771 
2772  metadata->max_luminance.num = s->sei.mastering_display.max_luminance;
2773  metadata->max_luminance.den = luma_den;
2774  metadata->min_luminance.num = s->sei.mastering_display.min_luminance;
2775  metadata->min_luminance.den = luma_den;
2776  metadata->has_luminance = 1;
2777  metadata->has_primaries = 1;
2778 
2779  av_log(s->avctx, AV_LOG_DEBUG, "Mastering Display Metadata:\n");
2780  av_log(s->avctx, AV_LOG_DEBUG,
2781  "r(%5.4f,%5.4f) g(%5.4f,%5.4f) b(%5.4f %5.4f) wp(%5.4f, %5.4f)\n",
2782  av_q2d(metadata->display_primaries[0][0]),
2783  av_q2d(metadata->display_primaries[0][1]),
2784  av_q2d(metadata->display_primaries[1][0]),
2785  av_q2d(metadata->display_primaries[1][1]),
2786  av_q2d(metadata->display_primaries[2][0]),
2787  av_q2d(metadata->display_primaries[2][1]),
2788  av_q2d(metadata->white_point[0]), av_q2d(metadata->white_point[1]));
2789  av_log(s->avctx, AV_LOG_DEBUG,
2790  "min_luminance=%f, max_luminance=%f\n",
2791  av_q2d(metadata->min_luminance), av_q2d(metadata->max_luminance));
2792  }
2793  // Decrement the mastering display flag when IRAP frame has no_rasl_output_flag=1
2794  // so the side data persists for the entire coded video sequence.
2795  if (s->sei.content_light.present > 0 &&
2796  IS_IRAP(s) && s->no_rasl_output_flag) {
2797  s->sei.content_light.present--;
2798  }
2799  if (s->sei.content_light.present) {
2800  AVContentLightMetadata *metadata =
2802  if (!metadata)
2803  return AVERROR(ENOMEM);
2804  metadata->MaxCLL = s->sei.content_light.max_content_light_level;
2805  metadata->MaxFALL = s->sei.content_light.max_pic_average_light_level;
2806 
2807  av_log(s->avctx, AV_LOG_DEBUG, "Content Light Level Metadata:\n");
2808  av_log(s->avctx, AV_LOG_DEBUG, "MaxCLL=%d, MaxFALL=%d\n",
2809  metadata->MaxCLL, metadata->MaxFALL);
2810  }
2811 
2812  ret = ff_h2645_sei_to_frame(out, &s->sei.common, AV_CODEC_ID_HEVC, NULL,
2813  &s->ps.sps->vui.common,
2814  s->ps.sps->bit_depth, s->ps.sps->bit_depth_chroma,
2815  s->ref->poc /* no poc_offset in HEVC */);
2816  if (ret < 0)
2817  return ret;
2818 
2819  if (s->sei.timecode.present) {
2820  uint32_t *tc_sd;
2821  char tcbuf[AV_TIMECODE_STR_SIZE];
2823  sizeof(uint32_t) * 4);
2824  if (!tcside)
2825  return AVERROR(ENOMEM);
2826 
2827  tc_sd = (uint32_t*)tcside->data;
2828  tc_sd[0] = s->sei.timecode.num_clock_ts;
2829 
2830  for (int i = 0; i < tc_sd[0]; i++) {
2831  int drop = s->sei.timecode.cnt_dropped_flag[i];
2832  int hh = s->sei.timecode.hours_value[i];
2833  int mm = s->sei.timecode.minutes_value[i];
2834  int ss = s->sei.timecode.seconds_value[i];
2835  int ff = s->sei.timecode.n_frames[i];
2836 
2837  tc_sd[i + 1] = av_timecode_get_smpte(s->avctx->framerate, drop, hh, mm, ss, ff);
2838  av_timecode_make_smpte_tc_string2(tcbuf, s->avctx->framerate, tc_sd[i + 1], 0, 0);
2839  av_dict_set(&out->metadata, "timecode", tcbuf, 0);
2840  }
2841 
2842  s->sei.timecode.num_clock_ts = 0;
2843  }
2844 
2845  if (s->sei.common.dynamic_hdr_plus.info) {
2846  AVBufferRef *info_ref = av_buffer_ref(s->sei.common.dynamic_hdr_plus.info);
2847  if (!info_ref)
2848  return AVERROR(ENOMEM);
2849 
2851  av_buffer_unref(&info_ref);
2852  return AVERROR(ENOMEM);
2853  }
2854  }
2855 
2856  if (s->rpu_buf) {
2858  if (!rpu)
2859  return AVERROR(ENOMEM);
2860 
2861  s->rpu_buf = NULL;
2862  }
2863 
2864  if ((ret = ff_dovi_attach_side_data(&s->dovi_ctx, out)) < 0)
2865  return ret;
2866 
2867  if (s->sei.common.dynamic_hdr_vivid.info) {
2868  AVBufferRef *info_ref = av_buffer_ref(s->sei.common.dynamic_hdr_vivid.info);
2869  if (!info_ref)
2870  return AVERROR(ENOMEM);
2871 
2873  av_buffer_unref(&info_ref);
2874  return AVERROR(ENOMEM);
2875  }
2876  }
2877 
2878  return 0;
2879 }
2880 
2882 {
2883  HEVCLocalContext *lc = s->HEVClc;
2884  int pic_size_in_ctb = ((s->ps.sps->width >> s->ps.sps->log2_min_cb_size) + 1) *
2885  ((s->ps.sps->height >> s->ps.sps->log2_min_cb_size) + 1);
2886  int ret;
2887 
2888  memset(s->horizontal_bs, 0, s->bs_width * s->bs_height);
2889  memset(s->vertical_bs, 0, s->bs_width * s->bs_height);
2890  memset(s->cbf_luma, 0, s->ps.sps->min_tb_width * s->ps.sps->min_tb_height);
2891  memset(s->is_pcm, 0, (s->ps.sps->min_pu_width + 1) * (s->ps.sps->min_pu_height + 1));
2892  memset(s->tab_slice_address, -1, pic_size_in_ctb * sizeof(*s->tab_slice_address));
2893 
2894  s->is_decoded = 0;
2895  s->first_nal_type = s->nal_unit_type;
2896 
2897  s->no_rasl_output_flag = IS_IDR(s) || IS_BLA(s) || (s->nal_unit_type == HEVC_NAL_CRA_NUT && s->last_eos);
2898 
2899  if (s->ps.pps->tiles_enabled_flag)
2900  lc->end_of_tiles_x = s->ps.pps->column_width[0] << s->ps.sps->log2_ctb_size;
2901 
2902  ret = ff_hevc_set_new_ref(s, &s->frame, s->poc);
2903  if (ret < 0)
2904  goto fail;
2905 
2906  ret = ff_hevc_frame_rps(s);
2907  if (ret < 0) {
2908  av_log(s->avctx, AV_LOG_ERROR, "Error constructing the frame RPS.\n");
2909  goto fail;
2910  }
2911 
2912  s->ref->frame->key_frame = IS_IRAP(s);
2913 
2914  s->ref->needs_fg = s->sei.common.film_grain_characteristics.present &&
2915  !(s->avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN) &&
2916  !s->avctx->hwaccel;
2917 
2918  if (s->ref->needs_fg) {
2919  s->ref->frame_grain->format = s->ref->frame->format;
2920  s->ref->frame_grain->width = s->ref->frame->width;
2921  s->ref->frame_grain->height = s->ref->frame->height;
2922  if ((ret = ff_thread_get_buffer(s->avctx, s->ref->frame_grain, 0)) < 0)
2923  goto fail;
2924  }
2925 
2926  ret = set_side_data(s);
2927  if (ret < 0)
2928  goto fail;
2929 
2930  s->frame->pict_type = 3 - s->sh.slice_type;
2931 
2932  if (!IS_IRAP(s))
2934 
2935  av_frame_unref(s->output_frame);
2936  ret = ff_hevc_output_frame(s, s->output_frame, 0);
2937  if (ret < 0)
2938  goto fail;
2939 
2940  if (!s->avctx->hwaccel)
2941  ff_thread_finish_setup(s->avctx);
2942 
2943  return 0;
2944 
2945 fail:
2946  if (s->ref)
2947  ff_hevc_unref_frame(s, s->ref, ~0);
2948  s->ref = NULL;
2949  return ret;
2950 }
2951 
2953 {
2954  HEVCFrame *out = s->ref;
2955  const AVFrameSideData *sd;
2956  int ret;
2957 
2958  if (out->needs_fg) {
2960  av_assert0(out->frame_grain->buf[0] && sd);
2961  ret = ff_h274_apply_film_grain(out->frame_grain, out->frame, &s->h274db,
2962  (AVFilmGrainParams *) sd->data);
2963 
2964  if (ret < 0) {
2965  av_log(s->avctx, AV_LOG_WARNING, "Failed synthesizing film "
2966  "grain, ignoring: %s\n", av_err2str(ret));
2967  out->needs_fg = 0;
2968  }
2969  }
2970 
2971  return 0;
2972 }
2973 
2974 static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal)
2975 {
2976  HEVCLocalContext *lc = s->HEVClc;
2977  GetBitContext *gb = &lc->gb;
2978  int ctb_addr_ts, ret;
2979 
2980  *gb = nal->gb;
2981  s->nal_unit_type = nal->type;
2982  s->temporal_id = nal->temporal_id;
2983 
2984  switch (s->nal_unit_type) {
2985  case HEVC_NAL_VPS:
2986  if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
2987  ret = s->avctx->hwaccel->decode_params(s->avctx,
2988  nal->type,
2989  nal->raw_data,
2990  nal->raw_size);
2991  if (ret < 0)
2992  goto fail;
2993  }
2994  ret = ff_hevc_decode_nal_vps(gb, s->avctx, &s->ps);
2995  if (ret < 0)
2996  goto fail;
2997  break;
2998  case HEVC_NAL_SPS:
2999  if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
3000  ret = s->avctx->hwaccel->decode_params(s->avctx,
3001  nal->type,
3002  nal->raw_data,
3003  nal->raw_size);
3004  if (ret < 0)
3005  goto fail;
3006  }
3007  ret = ff_hevc_decode_nal_sps(gb, s->avctx, &s->ps,
3008  s->apply_defdispwin);
3009  if (ret < 0)
3010  goto fail;
3011  break;
3012  case HEVC_NAL_PPS:
3013  if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
3014  ret = s->avctx->hwaccel->decode_params(s->avctx,
3015  nal->type,
3016  nal->raw_data,
3017  nal->raw_size);
3018  if (ret < 0)
3019  goto fail;
3020  }
3021  ret = ff_hevc_decode_nal_pps(gb, s->avctx, &s->ps);
3022  if (ret < 0)
3023  goto fail;
3024  break;
3025  case HEVC_NAL_SEI_PREFIX:
3026  case HEVC_NAL_SEI_SUFFIX:
3027  if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
3028  ret = s->avctx->hwaccel->decode_params(s->avctx,
3029  nal->type,
3030  nal->raw_data,
3031  nal->raw_size);
3032  if (ret < 0)
3033  goto fail;
3034  }
3035  ret = ff_hevc_decode_nal_sei(gb, s->avctx, &s->sei, &s->ps, s->nal_unit_type);
3036  if (ret < 0)
3037  goto fail;
3038  break;
3039  case HEVC_NAL_TRAIL_R:
3040  case HEVC_NAL_TRAIL_N:
3041  case HEVC_NAL_TSA_N:
3042  case HEVC_NAL_TSA_R:
3043  case HEVC_NAL_STSA_N:
3044  case HEVC_NAL_STSA_R:
3045  case HEVC_NAL_BLA_W_LP:
3046  case HEVC_NAL_BLA_W_RADL:
3047  case HEVC_NAL_BLA_N_LP:
3048  case HEVC_NAL_IDR_W_RADL:
3049  case HEVC_NAL_IDR_N_LP:
3050  case HEVC_NAL_CRA_NUT:
3051  case HEVC_NAL_RADL_N:
3052  case HEVC_NAL_RADL_R:
3053  case HEVC_NAL_RASL_N:
3054  case HEVC_NAL_RASL_R:
3055  ret = hls_slice_header(s);
3056  if (ret < 0)
3057  return ret;
3058  if (ret == 1) {
3060  goto fail;
3061  }
3062 
3063 
3064  if (
3065  (s->avctx->skip_frame >= AVDISCARD_BIDIR && s->sh.slice_type == HEVC_SLICE_B) ||
3066  (s->avctx->skip_frame >= AVDISCARD_NONINTRA && s->sh.slice_type != HEVC_SLICE_I) ||
3067  (s->avctx->skip_frame >= AVDISCARD_NONKEY && !IS_IRAP(s))) {
3068  break;
3069  }
3070 
3071  if (s->sh.first_slice_in_pic_flag) {
3072  if (s->max_ra == INT_MAX) {
3073  if (s->nal_unit_type == HEVC_NAL_CRA_NUT || IS_BLA(s)) {
3074  s->max_ra = s->poc;
3075  } else {
3076  if (IS_IDR(s))
3077  s->max_ra = INT_MIN;
3078  }
3079  }
3080 
3081  if ((s->nal_unit_type == HEVC_NAL_RASL_R || s->nal_unit_type == HEVC_NAL_RASL_N) &&
3082  s->poc <= s->max_ra) {
3083  s->is_decoded = 0;
3084  break;
3085  } else {
3086  if (s->nal_unit_type == HEVC_NAL_RASL_R && s->poc > s->max_ra)
3087  s->max_ra = INT_MIN;
3088  }
3089 
3090  s->overlap ++;
3091  ret = hevc_frame_start(s);
3092  if (ret < 0)
3093  return ret;
3094  } else if (!s->ref) {
3095  av_log(s->avctx, AV_LOG_ERROR, "First slice in a frame missing.\n");
3096  goto fail;
3097  }
3098 
3099  if (s->nal_unit_type != s->first_nal_type) {
3100  av_log(s->avctx, AV_LOG_ERROR,
3101  "Non-matching NAL types of the VCL NALUs: %d %d\n",
3102  s->first_nal_type, s->nal_unit_type);
3103  return AVERROR_INVALIDDATA;
3104  }
3105 
3106  if (!s->sh.dependent_slice_segment_flag &&
3107  s->sh.slice_type != HEVC_SLICE_I) {
3108  ret = ff_hevc_slice_rpl(s);
3109  if (ret < 0) {
3110  av_log(s->avctx, AV_LOG_WARNING,
3111  "Error constructing the reference lists for the current slice.\n");
3112  goto fail;
3113  }
3114  }
3115 
3116  if (s->sh.first_slice_in_pic_flag && s->avctx->hwaccel) {
3117  ret = s->avctx->hwaccel->start_frame(s->avctx, NULL, 0);
3118  if (ret < 0)
3119  goto fail;
3120  }
3121 
3122  if (s->avctx->hwaccel) {
3123  ret = s->avctx->hwaccel->decode_slice(s->avctx, nal->raw_data, nal->raw_size);
3124  if (ret < 0)
3125  goto fail;
3126  } else {
3127  if (s->avctx->profile == FF_PROFILE_HEVC_SCC) {
3128  av_log(s->avctx, AV_LOG_ERROR,
3129  "SCC profile is not yet implemented in hevc native decoder.\n");
3131  goto fail;
3132  }
3133 
3134  if (s->threads_number > 1 && s->sh.num_entry_point_offsets > 0)
3135  ctb_addr_ts = hls_slice_data_wpp(s, nal);
3136  else
3137  ctb_addr_ts = hls_slice_data(s);
3138  if (ctb_addr_ts >= (s->ps.sps->ctb_width * s->ps.sps->ctb_height)) {
3139  ret = hevc_frame_end(s);
3140  if (ret < 0)
3141  goto fail;
3142  s->is_decoded = 1;
3143  }
3144 
3145  if (ctb_addr_ts < 0) {
3146  ret = ctb_addr_ts;
3147  goto fail;
3148  }
3149  }
3150  break;
3151  case HEVC_NAL_EOS_NUT:
3152  case HEVC_NAL_EOB_NUT:
3153  s->seq_decode = (s->seq_decode + 1) & HEVC_SEQUENCE_COUNTER_MASK;
3154  s->max_ra = INT_MAX;
3155  break;
3156  case HEVC_NAL_AUD:
3157  case HEVC_NAL_FD_NUT:
3158  case HEVC_NAL_UNSPEC62:
3159  break;
3160  default:
3161  av_log(s->avctx, AV_LOG_INFO,
3162  "Skipping NAL unit %d\n", s->nal_unit_type);
3163  }
3164 
3165  return 0;
3166 fail:
3167  if (s->avctx->err_recognition & AV_EF_EXPLODE)
3168  return ret;
3169  return 0;
3170 }
3171 
3172 static int decode_nal_units(HEVCContext *s, const uint8_t *buf, int length)
3173 {
3174  int i, ret = 0;
3175  int eos_at_start = 1;
3176 
3177  s->ref = NULL;
3178  s->last_eos = s->eos;
3179  s->eos = 0;
3180  s->overlap = 0;
3181 
3182  /* split the input packet into NAL units, so we know the upper bound on the
3183  * number of slices in the frame */
3184  ret = ff_h2645_packet_split(&s->pkt, buf, length, s->avctx, s->is_nalff,
3185  s->nal_length_size, s->avctx->codec_id, 1, 0);
3186  if (ret < 0) {
3187  av_log(s->avctx, AV_LOG_ERROR,
3188  "Error splitting the input into NAL units.\n");
3189  return ret;
3190  }
3191 
3192  for (i = 0; i < s->pkt.nb_nals; i++) {
3193  if (s->pkt.nals[i].type == HEVC_NAL_EOB_NUT ||
3194  s->pkt.nals[i].type == HEVC_NAL_EOS_NUT) {
3195  if (eos_at_start) {
3196  s->last_eos = 1;
3197  } else {
3198  s->eos = 1;
3199  }
3200  } else {
3201  eos_at_start = 0;
3202  }
3203  }
3204 
3205  /*
3206  * Check for RPU delimiter.
3207  *
3208  * Dolby Vision RPUs masquerade as unregistered NALs of type 62.
3209  *
3210  * We have to do this check here an create the rpu buffer, since RPUs are appended
3211  * to the end of an AU; they are the last non-EOB/EOS NAL in the AU.
3212  */
3213  if (s->pkt.nb_nals > 1 && s->pkt.nals[s->pkt.nb_nals - 1].type == HEVC_NAL_UNSPEC62 &&
3214  s->pkt.nals[s->pkt.nb_nals - 1].size > 2 && !s->pkt.nals[s->pkt.nb_nals - 1].nuh_layer_id
3215  && !s->pkt.nals[s->pkt.nb_nals - 1].temporal_id) {
3216  H2645NAL *nal = &s->pkt.nals[s->pkt.nb_nals - 1];
3217  if (s->rpu_buf) {
3218  av_buffer_unref(&s->rpu_buf);
3219  av_log(s->avctx, AV_LOG_WARNING, "Multiple Dolby Vision RPUs found in one AU. Skipping previous.\n");
3220  }
3221 
3222  s->rpu_buf = av_buffer_alloc(nal->raw_size - 2);
3223  if (!s->rpu_buf)
3224  return AVERROR(ENOMEM);
3225  memcpy(s->rpu_buf->data, nal->raw_data + 2, nal->raw_size - 2);
3226 
3227  ret = ff_dovi_rpu_parse(&s->dovi_ctx, nal->data + 2, nal->size - 2);
3228  if (ret < 0) {
3229  av_buffer_unref(&s->rpu_buf);
3230  av_log(s->avctx, AV_LOG_WARNING, "Error parsing DOVI NAL unit.\n");
3231  /* ignore */
3232  }
3233  }
3234 
3235  /* decode the NAL units */
3236  for (i = 0; i < s->pkt.nb_nals; i++) {
3237  H2645NAL *nal = &s->pkt.nals[i];
3238 
3239  if (s->avctx->skip_frame >= AVDISCARD_ALL ||
3240  (s->avctx->skip_frame >= AVDISCARD_NONREF
3241  && ff_hevc_nal_is_nonref(nal->type)) || nal->nuh_layer_id > 0)
3242  continue;
3243 
3244  ret = decode_nal_unit(s, nal);
3245  if (ret >= 0 && s->overlap > 2)
3247  if (ret < 0) {
3248  av_log(s->avctx, AV_LOG_WARNING,
3249  "Error parsing NAL unit #%d.\n", i);
3250  goto fail;
3251  }
3252  }
3253 
3254 fail:
3255  if (s->ref && s->threads_type == FF_THREAD_FRAME)
3256  ff_thread_report_progress(&s->ref->tf, INT_MAX, 0);
3257 
3258  return ret;
3259 }
3260 
3262 {
3264  char msg_buf[4 * (50 + 2 * 2 * 16 /* MD5-size */)];
3265  int pixel_shift;
3266  int err = 0;
3267  int i, j;
3268 
3269  if (!desc)
3270  return AVERROR(EINVAL);
3271 
3272  pixel_shift = desc->comp[0].depth > 8;
3273 
3274  /* the checksums are LE, so we have to byteswap for >8bpp formats
3275  * on BE arches */
3276 #if HAVE_BIGENDIAN
3277  if (pixel_shift && !s->checksum_buf) {
3278  av_fast_malloc(&s->checksum_buf, &s->checksum_buf_size,
3279  FFMAX3(frame->linesize[0], frame->linesize[1],
3280  frame->linesize[2]));
3281  if (!s->checksum_buf)
3282  return AVERROR(ENOMEM);
3283  }
3284 #endif
3285 
3286  msg_buf[0] = '\0';
3287  for (i = 0; frame->data[i]; i++) {
3288  int width = s->avctx->coded_width;
3289  int height = s->avctx->coded_height;
3290  int w = (i == 1 || i == 2) ? (width >> desc->log2_chroma_w) : width;
3291  int h = (i == 1 || i == 2) ? (height >> desc->log2_chroma_h) : height;
3292  uint8_t md5[16];
3293 
3294  av_md5_init(s->md5_ctx);
3295  for (j = 0; j < h; j++) {
3296  const uint8_t *src = frame->data[i] + j * frame->linesize[i];
3297 #if HAVE_BIGENDIAN
3298  if (pixel_shift) {
3299  s->bdsp.bswap16_buf((uint16_t *) s->checksum_buf,
3300  (const uint16_t *) src, w);
3301  src = s->checksum_buf;
3302  }
3303 #endif
3304  av_md5_update(s->md5_ctx, src, w << pixel_shift);
3305  }
3306  av_md5_final(s->md5_ctx, md5);
3307 
3308 #define MD5_PRI "%016" PRIx64 "%016" PRIx64
3309 #define MD5_PRI_ARG(buf) AV_RB64(buf), AV_RB64((const uint8_t*)(buf) + 8)
3310 
3311  if (!memcmp(md5, s->sei.picture_hash.md5[i], 16)) {
3312  av_strlcatf(msg_buf, sizeof(msg_buf),
3313  "plane %d - correct " MD5_PRI "; ",
3314  i, MD5_PRI_ARG(md5));
3315  } else {
3316  av_strlcatf(msg_buf, sizeof(msg_buf),
3317  "mismatching checksum of plane %d - " MD5_PRI " != " MD5_PRI "; ",
3318  i, MD5_PRI_ARG(md5), MD5_PRI_ARG(s->sei.picture_hash.md5[i]));
3319  err = AVERROR_INVALIDDATA;
3320  }
3321  }
3322 
3323  av_log(s->avctx, err < 0 ? AV_LOG_ERROR : AV_LOG_DEBUG,
3324  "Verifying checksum for frame with POC %d: %s\n",
3325  s->poc, msg_buf);
3326 
3327  return err;
3328 }
3329 
3330 static int hevc_decode_extradata(HEVCContext *s, uint8_t *buf, int length, int first)
3331 {
3332  int ret, i;
3333 
3334  ret = ff_hevc_decode_extradata(buf, length, &s->ps, &s->sei, &s->is_nalff,
3335  &s->nal_length_size, s->avctx->err_recognition,
3336  s->apply_defdispwin, s->avctx);
3337  if (ret < 0)
3338  return ret;
3339 
3340  /* export stream parameters from the first SPS */
3341  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.sps_list); i++) {
3342  if (first && s->ps.sps_list[i]) {
3343  const HEVCSPS *sps = (const HEVCSPS*)s->ps.sps_list[i]->data;
3345  break;
3346  }
3347  }
3348 
3349  /* export stream parameters from SEI */
3351  if (ret < 0)
3352  return ret;
3353 
3354  return 0;
3355 }
3356 
3357 static int hevc_decode_frame(AVCodecContext *avctx, AVFrame *rframe,
3358  int *got_output, AVPacket *avpkt)
3359 {
3360  int ret;
3361  uint8_t *sd;
3362  size_t sd_size;
3363  HEVCContext *s = avctx->priv_data;
3364 
3365  if (!avpkt->size) {
3366  ret = ff_hevc_output_frame(s, rframe, 1);
3367  if (ret < 0)
3368  return ret;
3369 
3370  *got_output = ret;
3371  return 0;
3372  }
3373 
3374  sd = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, &sd_size);
3375  if (sd && sd_size > 0) {
3376  ret = hevc_decode_extradata(s, sd, sd_size, 0);
3377  if (ret < 0)
3378  return ret;
3379  }
3380 
3381  sd = av_packet_get_side_data(avpkt, AV_PKT_DATA_DOVI_CONF, &sd_size);
3382  if (sd && sd_size > 0)
3384 
3385  s->ref = NULL;
3386  ret = decode_nal_units(s, avpkt->data, avpkt->size);
3387  if (ret < 0)
3388  return ret;
3389 
3390  if (avctx->hwaccel) {
3391  if (s->ref && (ret = avctx->hwaccel->end_frame(avctx)) < 0) {
3392  av_log(avctx, AV_LOG_ERROR,
3393  "hardware accelerator failed to decode picture\n");
3394  ff_hevc_unref_frame(s, s->ref, ~0);
3395  return ret;
3396  }
3397  } else {
3398  /* verify the SEI checksum */
3399  if (avctx->err_recognition & AV_EF_CRCCHECK && s->ref && s->is_decoded &&
3400  s->sei.picture_hash.is_md5) {
3401  ret = verify_md5(s, s->ref->frame);
3402  if (ret < 0 && avctx->err_recognition & AV_EF_EXPLODE) {
3403  ff_hevc_unref_frame(s, s->ref, ~0);
3404  return ret;
3405  }
3406  }
3407  }
3408  s->sei.picture_hash.is_md5 = 0;
3409 
3410  if (s->is_decoded) {
3411  av_log(avctx, AV_LOG_DEBUG, "Decoded frame with POC %d.\n", s->poc);
3412  s->is_decoded = 0;
3413  }
3414 
3415  if (s->output_frame->buf[0]) {
3416  av_frame_move_ref(rframe, s->output_frame);
3417  *got_output = 1;
3418  }
3419 
3420  return avpkt->size;
3421 }
3422 
3424 {
3425  int ret;
3426 
3427  ret = ff_thread_ref_frame(&dst->tf, &src->tf);
3428  if (ret < 0)
3429  return ret;
3430 
3431  if (src->needs_fg) {
3432  ret = av_frame_ref(dst->frame_grain, src->frame_grain);
3433  if (ret < 0)
3434  return ret;
3435  dst->needs_fg = 1;
3436  }
3437 
3438  dst->tab_mvf_buf = av_buffer_ref(src->tab_mvf_buf);
3439  if (!dst->tab_mvf_buf)
3440  goto fail;
3441  dst->tab_mvf = src->tab_mvf;
3442 
3443  dst->rpl_tab_buf = av_buffer_ref(src->rpl_tab_buf);
3444  if (!dst->rpl_tab_buf)
3445  goto fail;
3446  dst->rpl_tab = src->rpl_tab;
3447 
3448  dst->rpl_buf = av_buffer_ref(src->rpl_buf);
3449  if (!dst->rpl_buf)
3450  goto fail;
3451 
3452  dst->poc = src->poc;
3453  dst->ctb_count = src->ctb_count;
3454  dst->flags = src->flags;
3455  dst->sequence = src->sequence;
3456 
3457  if (src->hwaccel_picture_private) {
3458  dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
3459  if (!dst->hwaccel_priv_buf)
3460  goto fail;
3462  }
3463 
3464  return 0;
3465 fail:
3466  ff_hevc_unref_frame(s, dst, ~0);
3467  return AVERROR(ENOMEM);
3468 }
3469 
3471 {
3472  HEVCContext *s = avctx->priv_data;
3473  int i;
3474 
3475  pic_arrays_free(s);
3476 
3477  ff_dovi_ctx_unref(&s->dovi_ctx);
3478  av_buffer_unref(&s->rpu_buf);
3479 
3480  av_freep(&s->md5_ctx);
3481 
3482  for (i = 0; i < 3; i++) {
3483  av_freep(&s->sao_pixel_buffer_h[i]);
3484  av_freep(&s->sao_pixel_buffer_v[i]);
3485  }
3486  av_frame_free(&s->output_frame);
3487 
3488  for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3489  ff_hevc_unref_frame(s, &s->DPB[i], ~0);
3490  av_frame_free(&s->DPB[i].frame);
3491  av_frame_free(&s->DPB[i].frame_grain);
3492  }
3493 
3494  ff_hevc_ps_uninit(&s->ps);
3495 
3496  av_freep(&s->sh.entry_point_offset);
3497  av_freep(&s->sh.offset);
3498  av_freep(&s->sh.size);
3499 
3500  if (s->HEVClcList) {
3501  for (i = 1; i < s->threads_number; i++) {
3502  av_freep(&s->HEVClcList[i]);
3503  }
3504  }
3505  av_freep(&s->HEVClc);
3506  av_freep(&s->HEVClcList);
3507 
3508  ff_h2645_packet_uninit(&s->pkt);
3509 
3510  ff_hevc_reset_sei(&s->sei);
3511 
3512  return 0;
3513 }
3514 
3516 {
3517  HEVCContext *s = avctx->priv_data;
3518  int i;
3519 
3520  s->avctx = avctx;
3521 
3522  s->HEVClc = av_mallocz(sizeof(HEVCLocalContext));
3523  s->HEVClcList = av_mallocz(sizeof(HEVCLocalContext*) * s->threads_number);
3524  if (!s->HEVClc || !s->HEVClcList)
3525  return AVERROR(ENOMEM);
3526  s->HEVClc->parent = s;
3527  s->HEVClc->logctx = avctx;
3528  s->HEVClc->common_cabac_state = &s->cabac;
3529  s->HEVClcList[0] = s->HEVClc;
3530 
3531  s->output_frame = av_frame_alloc();
3532  if (!s->output_frame)
3533  return AVERROR(ENOMEM);
3534 
3535  for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3536  s->DPB[i].frame = av_frame_alloc();
3537  if (!s->DPB[i].frame)
3538  return AVERROR(ENOMEM);
3539  s->DPB[i].tf.f = s->DPB[i].frame;
3540 
3541  s->DPB[i].frame_grain = av_frame_alloc();
3542  if (!s->DPB[i].frame_grain)
3543  return AVERROR(ENOMEM);
3544  }
3545 
3546  s->max_ra = INT_MAX;
3547 
3548  s->md5_ctx = av_md5_alloc();
3549  if (!s->md5_ctx)
3550  return AVERROR(ENOMEM);
3551 
3552  ff_bswapdsp_init(&s->bdsp);
3553 
3554  s->dovi_ctx.logctx = avctx;
3555  s->eos = 0;
3556 
3557  ff_hevc_reset_sei(&s->sei);
3558 
3559  return 0;
3560 }
3561 
3562 #if HAVE_THREADS
3563 static int hevc_update_thread_context(AVCodecContext *dst,
3564  const AVCodecContext *src)
3565 {
3566  HEVCContext *s = dst->priv_data;
3567  HEVCContext *s0 = src->priv_data;
3568  int i, ret;
3569 
3570  for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3571  ff_hevc_unref_frame(s, &s->DPB[i], ~0);
3572  if (s0->DPB[i].frame->buf[0]) {
3573  ret = hevc_ref_frame(s, &s->DPB[i], &s0->DPB[i]);
3574  if (ret < 0)
3575  return ret;
3576  }
3577  }
3578 
3579  if (s->ps.sps != s0->ps.sps)
3580  s->ps.sps = NULL;
3581  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.vps_list); i++) {
3582  ret = av_buffer_replace(&s->ps.vps_list[i], s0->ps.vps_list[i]);
3583  if (ret < 0)
3584  return ret;
3585  }
3586 
3587  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.sps_list); i++) {
3588  ret = av_buffer_replace(&s->ps.sps_list[i], s0->ps.sps_list[i]);
3589  if (ret < 0)
3590  return ret;
3591  }
3592 
3593  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.pps_list); i++) {
3594  ret = av_buffer_replace(&s->ps.pps_list[i], s0->ps.pps_list[i]);
3595  if (ret < 0)
3596  return ret;
3597  }
3598 
3599  if (s->ps.sps != s0->ps.sps)
3600  if ((ret = set_sps(s, s0->ps.sps, src->pix_fmt)) < 0)
3601  return ret;
3602 
3603  s->seq_decode = s0->seq_decode;
3604  s->seq_output = s0->seq_output;
3605  s->pocTid0 = s0->pocTid0;
3606  s->max_ra = s0->max_ra;
3607  s->eos = s0->eos;
3608  s->no_rasl_output_flag = s0->no_rasl_output_flag;
3609 
3610  s->is_nalff = s0->is_nalff;
3611  s->nal_length_size = s0->nal_length_size;
3612 
3613  s->threads_number = s0->threads_number;
3614  s->threads_type = s0->threads_type;
3615 
3616  if (s0->eos) {
3617  s->seq_decode = (s->seq_decode + 1) & HEVC_SEQUENCE_COUNTER_MASK;
3618  s->max_ra = INT_MAX;
3619  }
3620 
3621  ret = ff_h2645_sei_ctx_replace(&s->sei.common, &s0->sei.common);
3622  if (ret < 0)
3623  return ret;
3624 
3625  ret = av_buffer_replace(&s->sei.common.dynamic_hdr_plus.info,
3626  s0->sei.common.dynamic_hdr_plus.info);
3627  if (ret < 0)
3628  return ret;
3629 
3630  ret = av_buffer_replace(&s->rpu_buf, s0->rpu_buf);
3631  if (ret < 0)
3632  return ret;
3633 
3634  ret = ff_dovi_ctx_replace(&s->dovi_ctx, &s0->dovi_ctx);
3635  if (ret < 0)
3636  return ret;
3637 
3638  ret = av_buffer_replace(&s->sei.common.dynamic_hdr_vivid.info,
3639  s0->sei.common.dynamic_hdr_vivid.info);
3640  if (ret < 0)
3641  return ret;
3642 
3643  s->sei.common.frame_packing = s0->sei.common.frame_packing;
3644  s->sei.common.display_orientation = s0->sei.common.display_orientation;
3645  s->sei.common.alternative_transfer = s0->sei.common.alternative_transfer;
3646  s->sei.mastering_display = s0->sei.mastering_display;
3647  s->sei.content_light = s0->sei.content_light;
3648 
3650  if (ret < 0)
3651  return ret;
3652 
3653  return 0;
3654 }
3655 #endif
3656 
3658 {
3659  HEVCContext *s = avctx->priv_data;
3660  int ret;
3661 
3662  if (avctx->active_thread_type & FF_THREAD_SLICE) {
3663  s->threads_number = avctx->thread_count;
3665  if (ret < 0)
3666  return ret;
3667  } else
3668  s->threads_number = 1;
3669 
3670  if((avctx->active_thread_type & FF_THREAD_FRAME) && avctx->thread_count > 1)
3671  s->threads_type = FF_THREAD_FRAME;
3672  else
3673  s->threads_type = FF_THREAD_SLICE;
3674 
3675  ret = hevc_init_context(avctx);
3676  if (ret < 0)
3677  return ret;
3678 
3679  s->enable_parallel_tiles = 0;
3680  s->sei.picture_timing.picture_struct = 0;
3681  s->eos = 1;
3682 
3683  atomic_init(&s->wpp_err, 0);
3684 
3685  if (!avctx->internal->is_copy) {
3686  if (avctx->extradata_size > 0 && avctx->extradata) {
3687  ret = hevc_decode_extradata(s, avctx->extradata, avctx->extradata_size, 1);
3688  if (ret < 0) {
3689  return ret;
3690  }
3691  }
3692  }
3693 
3694  return 0;
3695 }
3696 
3698 {
3699  HEVCContext *s = avctx->priv_data;
3701  ff_hevc_reset_sei(&s->sei);
3702  ff_dovi_ctx_flush(&s->dovi_ctx);
3703  av_buffer_unref(&s->rpu_buf);
3704  s->max_ra = INT_MAX;
3705  s->eos = 1;
3706 }
3707 
3708 #define OFFSET(x) offsetof(HEVCContext, x)
3709 #define PAR (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
3710 
3711 static const AVOption options[] = {
3712  { "apply_defdispwin", "Apply default display window from VUI", OFFSET(apply_defdispwin),
3713  AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, PAR },
3714  { "strict-displaywin", "stricly apply default display window size", OFFSET(apply_defdispwin),
3715  AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, PAR },
3716  { NULL },
3717 };
3718 
3719 static const AVClass hevc_decoder_class = {
3720  .class_name = "HEVC decoder",
3721  .item_name = av_default_item_name,
3722  .option = options,
3723  .version = LIBAVUTIL_VERSION_INT,
3724 };
3725 
3727  .p.name = "hevc",
3728  CODEC_LONG_NAME("HEVC (High Efficiency Video Coding)"),
3729  .p.type = AVMEDIA_TYPE_VIDEO,
3730  .p.id = AV_CODEC_ID_HEVC,
3731  .priv_data_size = sizeof(HEVCContext),
3732  .p.priv_class = &hevc_decoder_class,
3733  .init = hevc_decode_init,
3734  .close = hevc_decode_free,
3736  .flush = hevc_decode_flush,
3737  UPDATE_THREAD_CONTEXT(hevc_update_thread_context),
3738  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY |
3740  .caps_internal = FF_CODEC_CAP_EXPORTS_CROPPING |
3742  .p.profiles = NULL_IF_CONFIG_SMALL(ff_hevc_profiles),
3743  .hw_configs = (const AVCodecHWConfigInternal *const []) {
3744 #if CONFIG_HEVC_DXVA2_HWACCEL
3745  HWACCEL_DXVA2(hevc),
3746 #endif
3747 #if CONFIG_HEVC_D3D11VA_HWACCEL
3748  HWACCEL_D3D11VA(hevc),
3749 #endif
3750 #if CONFIG_HEVC_D3D11VA2_HWACCEL
3751  HWACCEL_D3D11VA2(hevc),
3752 #endif
3753 #if CONFIG_HEVC_NVDEC_HWACCEL
3754  HWACCEL_NVDEC(hevc),
3755 #endif
3756 #if CONFIG_HEVC_VAAPI_HWACCEL
3757  HWACCEL_VAAPI(hevc),
3758 #endif
3759 #if CONFIG_HEVC_VDPAU_HWACCEL
3760  HWACCEL_VDPAU(hevc),
3761 #endif
3762 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
3763  HWACCEL_VIDEOTOOLBOX(hevc),
3764 #endif
3765  NULL
3766  },
3767 };
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:31
ff_hevc_sao_offset_sign_decode
int ff_hevc_sao_offset_sign_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:606
verify_md5
static int verify_md5(HEVCContext *s, AVFrame *frame)
Definition: hevcdec.c:3261
hwconfig.h
AVMasteringDisplayMetadata::has_primaries
int has_primaries
Flag indicating whether the display primaries (and white point) are set.
Definition: mastering_display_metadata.h:62
MD5_PRI
#define MD5_PRI
HEVC_NAL_RADL_N
@ HEVC_NAL_RADL_N
Definition: hevc.h:35
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1409
SliceHeader::beta_offset
int beta_offset
beta_offset_div2 * 2
Definition: hevcdec.h:304
bswapdsp.h
L1
F H1 F F H1 F F F F H1<-F-------F-------F v v v H2 H3 H2 ^ ^ ^ F-------F-------F-> H1<-F-------F-------F|||||||||F H1 F|||||||||F H1 Funavailable fullpel samples(outside the picture for example) shall be equalto the closest available fullpel sampleSmaller pel interpolation:--------------------------if diag_mc is set then points which lie on a line between 2 vertically, horizontally or diagonally adjacent halfpel points shall be interpolatedlinearly with rounding to nearest and halfway values rounded up.points which lie on 2 diagonals at the same time should only use the onediagonal not containing the fullpel point F--> O q O<--h1-> O q O<--F v \/v \/v O O O O O O O|/|\|q q q q q|/|\|O O O O O O O ^/\ ^/\ ^ h2--> O q O<--h3-> O q O<--h2 v \/v \/v O O O O O O O|\|/|q q q q q|\|/|O O O O O O O ^/\ ^/\ ^ F--> O q O<--h1-> O q O<--Fthe remaining points shall be bilinearly interpolated from theup to 4 surrounding halfpel and fullpel points, again rounding should be tonearest and halfway values rounded upcompliant Snow decoders MUST support 1-1/8 pel luma and 1/2-1/16 pel chromainterpolation at leastOverlapped block motion compensation:-------------------------------------FIXMELL band prediction:===================Each sample in the LL0 subband is predicted by the median of the left, top andleft+top-topleft samples, samples outside the subband shall be considered tobe 0. To reverse this prediction in the decoder apply the following.for(y=0;y< height;y++){ for(x=0;x< width;x++){ sample[y][x]+=median(sample[y-1][x], sample[y][x-1], sample[y-1][x]+sample[y][x-1]-sample[y-1][x-1]);}}sample[-1][ *]=sample[ *][-1]=0;width, height here are the width and height of the LL0 subband not of the finalvideoDequantization:===============FIXMEWavelet Transform:==================Snow supports 2 wavelet transforms, the symmetric biorthogonal 5/3 integertransform and an integer approximation of the symmetric biorthogonal 9/7daubechies wavelet.2D IDWT(inverse discrete wavelet transform) --------------------------------------------The 2D IDWT applies a 2D filter recursively, each time combining the4 lowest frequency subbands into a single subband until only 1 subbandremains.The 2D filter is done by first applying a 1D filter in the vertical directionand then applying it in the horizontal one. --------------- --------------- --------------- ---------------|LL0|HL0|||||||||||||---+---|HL1||L0|H0|HL1||LL1|HL1|||||LH0|HH0|||||||||||||-------+-------|-> L1 H1 LH1 HH1 LH1 HH1 LH1 HH1 L1
Definition: snow.txt:554
av_buffer_pool_init
AVBufferPool * av_buffer_pool_init(size_t size, AVBufferRef *(*alloc)(size_t size))
Allocate and initialize a buffer pool.
Definition: buffer.c:280
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_TIMECODE_STR_SIZE
#define AV_TIMECODE_STR_SIZE
Definition: timecode.h:33
HEVCLocalContext
Definition: hevcdec.h:437
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:253
HEVCFrame::flags
uint8_t flags
A combination of HEVC_FRAME_FLAG_*.
Definition: hevcdec.h:434
AVMasteringDisplayMetadata::max_luminance
AVRational max_luminance
Max luminance of mastering display (cd/m^2).
Definition: mastering_display_metadata.h:57
SliceHeader::slice_act_cr_qp_offset
int slice_act_cr_qp_offset
Definition: hevcdec.h:300
HWACCEL_MAX
#define HWACCEL_MAX
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
HEVCFrame::tf
ThreadFrame tf
Definition: hevcdec.h:409
ff_hevc_hls_residual_coding
void ff_hevc_hls_residual_coding(HEVCLocalContext *lc, int x0, int y0, int log2_trafo_size, enum ScanType scan_idx, int c_idx)
Definition: hevc_cabac.c:1039
ff_hevc_skip_flag_decode
int ff_hevc_skip_flag_decode(HEVCLocalContext *lc, int x0, int y0, int x_cb, int y_cb)
Definition: hevc_cabac.c:628
HEVCFrame::hwaccel_priv_buf
AVBufferRef * hwaccel_priv_buf
Definition: hevcdec.h:422
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
ff_hevc_cu_chroma_qp_offset_idx
int ff_hevc_cu_chroma_qp_offset_idx(HEVCLocalContext *lc)
Definition: hevc_cabac.c:681
av_clip
#define av_clip
Definition: common.h:95
atomic_store
#define atomic_store(object, desired)
Definition: stdatomic.h:85
ff_hevc_pcm_flag_decode
int ff_hevc_pcm_flag_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:755
set_deblocking_bypass
static void set_deblocking_bypass(const HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:1325
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:664
ff_hevc_pred_init
void ff_hevc_pred_init(HEVCPredContext *hpc, int bit_depth)
Definition: hevcpred.c:43
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_dovi_ctx_unref
void ff_dovi_ctx_unref(DOVIContext *s)
Completely reset a DOVIContext, preserving only logctx.
Definition: dovi_rpu.c:43
chroma_mc_uni
static void chroma_mc_uni(HEVCLocalContext *lc, uint8_t *dst0, ptrdiff_t dststride, const uint8_t *src0, ptrdiff_t srcstride, int reflist, int x_off, int y_off, int block_w, int block_h, const struct MvField *current_mv, int chroma_weight, int chroma_offset)
8.5.3.2.2.2 Chroma sample uniprediction interpolation process
Definition: hevcdec.c:1673
hevc_decode_flush
static void hevc_decode_flush(AVCodecContext *avctx)
Definition: hevcdec.c:3697
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1006
PART_NxN
@ PART_NxN
Definition: hevcdec.h:145
decode_nal_unit
static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal)
Definition: hevcdec.c:2974
SliceHeader::slice_act_y_qp_offset
int slice_act_y_qp_offset
Definition: hevcdec.h:298
out
FILE * out
Definition: movenc.c:54
ff_thread_get_format
#define ff_thread_get_format
Definition: thread.h:65
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:690
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:678
SAO_BAND
@ SAO_BAND
Definition: hevcdec.h:211
ff_hevc_profiles
const AVProfile ff_hevc_profiles[]
Definition: profiles.c:95
ff_hevc_pred_mode_decode
int ff_hevc_pred_mode_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:692
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2888
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:119
ff_h2645_sei_to_frame
int ff_h2645_sei_to_frame(AVFrame *frame, H2645SEI *sei, enum AVCodecID codec_id, AVCodecContext *avctx, const H2645VUI *vui, unsigned bit_depth_luma, unsigned bit_depth_chroma, int seed)
Definition: h2645_sei.c:459
AVMasteringDisplayMetadata::display_primaries
AVRational display_primaries[3][2]
CIE 1931 xy chromaticity coords of color primaries (r, g, b order).
Definition: mastering_display_metadata.h:42
src1
const pixel * src1
Definition: h264pred_template.c:421
AVMasteringDisplayMetadata::has_luminance
int has_luminance
Flag indicating whether the luminance (min_ and max_) have been set.
Definition: mastering_display_metadata.h:67
set_ct_depth
static av_always_inline void set_ct_depth(const HEVCContext *s, int x0, int y0, int log2_cb_size, int ct_depth)
Definition: hevcdec.c:2091
HEVC_MAX_REFS
@ HEVC_MAX_REFS
Definition: hevc.h:119
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1386
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:411
HEVCLocalContext::ctb_up_flag
uint8_t ctb_up_flag
Definition: hevcdec.h:470
HEVCFrame::needs_fg
int needs_fg
Definition: hevcdec.h:410
mv
static const int8_t mv[256][2]
Definition: 4xm.c:80
SliceHeader::num_entry_point_offsets
int num_entry_point_offsets
Definition: hevcdec.h:313
HEVC_NAL_STSA_N
@ HEVC_NAL_STSA_N
Definition: hevc.h:33
HEVCFrame::frame_grain
AVFrame * frame_grain
Definition: hevcdec.h:408
AV_FRAME_DATA_FILM_GRAIN_PARAMS
@ AV_FRAME_DATA_FILM_GRAIN_PARAMS
Film grain parameters for a frame, described by AVFilmGrainParams.
Definition: frame.h:184
PART_2NxnU
@ PART_2NxnU
Definition: hevcdec.h:146
ff_hevc_luma_mv_mvp_mode
void ff_hevc_luma_mv_mvp_mode(HEVCLocalContext *lc, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv, int mvp_lx_flag, int LX)
Definition: hevc_mvs.c:583
AV_FRAME_DATA_S12M_TIMECODE
@ AV_FRAME_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1.
Definition: frame.h:152
av_mod_uintp2
#define av_mod_uintp2
Definition: common.h:122
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:103
AVContentLightMetadata::MaxCLL
unsigned MaxCLL
Max content light level (cd/m^2).
Definition: mastering_display_metadata.h:102
luma_intra_pred_mode
static int luma_intra_pred_mode(HEVCLocalContext *lc, int x0, int y0, int pu_size, int prev_intra_luma_pred_flag)
8.4.1
Definition: hevcdec.c:2011
H2645NAL::nuh_layer_id
int nuh_layer_id
Definition: h2645_parse.h:67
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
pixdesc.h
HEVCFrame::tab_mvf
MvField * tab_mvf
Definition: hevcdec.h:411
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:999
TransformUnit::cu_qp_delta
int cu_qp_delta
Definition: hevcdec.h:378
HEVC_NAL_TSA_N
@ HEVC_NAL_TSA_N
Definition: hevc.h:31
ff_hevc_cu_transquant_bypass_flag_decode
int ff_hevc_cu_transquant_bypass_flag_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:623
w
uint8_t w
Definition: llviddspenc.c:38
HWACCEL_DXVA2
#define HWACCEL_DXVA2(codec)
Definition: hwconfig.h:67
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:661
HEVCFrame::hwaccel_picture_private
void * hwaccel_picture_private
Definition: hevcdec.h:423
HEVC_MAX_PPS_COUNT
@ HEVC_MAX_PPS_COUNT
Definition: hevc.h:114
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:374
PAR
#define PAR
Definition: hevcdec.c:3709
INTRA_DC
@ INTRA_DC
Definition: hevcdec.h:173
AVOption
AVOption.
Definition: opt.h:251
HWACCEL_D3D11VA2
#define HWACCEL_D3D11VA2(codec)
Definition: hwconfig.h:69
ff_h2645_packet_uninit
void ff_h2645_packet_uninit(H2645Packet *pkt)
Free all the allocated memory in the packet.
Definition: h2645_parse.c:528
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:561
hevc_decode_free
static av_cold int hevc_decode_free(AVCodecContext *avctx)
Definition: hevcdec.c:3470
data
const char data[16]
Definition: mxf.c:146
Mv::y
int16_t y
vertical component of motion vector
Definition: hevcdec.h:349
AV_FRAME_DATA_DOVI_RPU_BUFFER
@ AV_FRAME_DATA_DOVI_RPU_BUFFER
Dolby Vision RPU raw data, suitable for passing to x265 or other libraries.
Definition: frame.h:197
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:459
SAO_EDGE
@ SAO_EDGE
Definition: hevcdec.h:212
atomic_int
intptr_t atomic_int
Definition: stdatomic.h:55
SliceHeader::slice_temporal_mvp_enabled_flag
uint8_t slice_temporal_mvp_enabled_flag
Definition: hevcdec.h:280
MvField::mv
Mv mv[2]
Definition: hevcdec.h:353
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:247
TransformUnit::is_cu_qp_delta_coded
uint8_t is_cu_qp_delta_coded
Definition: hevcdec.h:386
FFCodec
Definition: codec_internal.h:127
HEVC_NAL_RASL_N
@ HEVC_NAL_RASL_N
Definition: hevc.h:37
ff_hevc_intra_chroma_pred_mode_decode
int ff_hevc_intra_chroma_pred_mode_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:783
HEVC_NAL_STSA_R
@ HEVC_NAL_STSA_R
Definition: hevc.h:34
MODE_INTRA
@ MODE_INTRA
Definition: hevcdec.h:154
HEVC_NAL_BLA_W_RADL
@ HEVC_NAL_BLA_W_RADL
Definition: hevc.h:46
SliceHeader::slice_loop_filter_across_slices_enabled_flag
uint8_t slice_loop_filter_across_slices_enabled_flag
Definition: hevcdec.h:289
SAOParams::offset_sign
int offset_sign[3][4]
sao_offset_sign
Definition: hevcdsp.h:36
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
export_stream_params
static void export_stream_params(HEVCContext *s, const HEVCSPS *sps)
Definition: hevcdec.c:325
HEVCLocalContext::ctb_up_left_flag
uint8_t ctb_up_left_flag
Definition: hevcdec.h:472
ff_dovi_ctx_replace
int ff_dovi_ctx_replace(DOVIContext *s, const DOVIContext *s0)
Definition: dovi_rpu.c:64
H2645NAL::temporal_id
int temporal_id
HEVC only, nuh_temporal_id_plus_1 - 1.
Definition: h2645_parse.h:62
av_timecode_get_smpte
uint32_t av_timecode_get_smpte(AVRational rate, int drop, int hh, int mm, int ss, int ff)
Convert sei info to SMPTE 12M binary representation.
Definition: timecode.c:69
RefPicList
Definition: hevcdec.h:241
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:104
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:493
thread.h
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
OFFSET
#define OFFSET(x)
Definition: hevcdec.c:3708
PF_INTRA
@ PF_INTRA
Definition: hevcdec.h:165
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:351
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
ff_hevc_cu_qp_delta_sign_flag
int ff_hevc_cu_qp_delta_sign_flag(HEVCLocalContext *lc)
Definition: hevc_cabac.c:671
hls_decode_neighbour
static void hls_decode_neighbour(HEVCLocalContext *lc, int x_ctb, int y_ctb, int ctb_addr_ts)
Definition: hevcdec.c:2446
MODE_SKIP
@ MODE_SKIP
Definition: hevcdec.h:155
HEVCLocalContext::end_of_tiles_x
int end_of_tiles_x
Definition: hevcdec.h:473
AVContentLightMetadata
Content light level needed by to transmit HDR over HDMI (CTA-861.3).
Definition: mastering_display_metadata.h:98
CodingUnit::x
int x
Definition: hevcdec.h:335
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:371
BOUNDARY_LEFT_TILE
#define BOUNDARY_LEFT_TILE
Definition: hevcdec.h:487
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:1762
golomb.h
exp golomb vlc stuff
AVCodecInternal::is_copy
int is_copy
When using frame-threaded decoding, this field is set for the first worker thread (e....
Definition: internal.h:57
PART_2Nx2N
@ PART_2Nx2N
Definition: hevcdec.h:142
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:325
SET_SAO
#define SET_SAO(elem, value)
Definition: hevcdec.c:1016
HEVCLocalContext::ctb_up_right_flag
uint8_t ctb_up_right_flag
Definition: hevcdec.h:471
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
ff_hevc_clear_refs
void ff_hevc_clear_refs(HEVCContext *s)
Mark all frames in DPB as unused for reference.
Definition: hevc_refs.c:68
PRED_BI
@ PRED_BI
Definition: hevcdec.h:161
ff_hevc_log2_res_scale_abs
int ff_hevc_log2_res_scale_abs(HEVCLocalContext *lc, int idx)
Definition: hevc_cabac.c:909
ff_hevc_hls_mvd_coding
void ff_hevc_hls_mvd_coding(HEVCLocalContext *lc, int x0, int y0, int log2_cb_size)
Definition: hevc_cabac.c:1549
luma_mc_uni
static void luma_mc_uni(HEVCLocalContext *lc, uint8_t *dst, ptrdiff_t dststride, const AVFrame *ref, const Mv *mv, int x_off, int y_off, int block_w, int block_h, int luma_weight, int luma_offset)
8.5.3.2.2.1 Luma sample unidirectional interpolation process
Definition: hevcdec.c:1520
av_ceil_log2
#define av_ceil_log2
Definition: common.h:92
fail
#define fail()
Definition: checkasm.h:134
PredictionUnit::intra_pred_mode_c
uint8_t intra_pred_mode_c[4]
Definition: hevcdec.h:373
AVCodecContext::thread_count
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
Definition: avcodec.h:1506
md5
struct AVMD5 * md5
Definition: movenc.c:56
InterPredIdc
InterPredIdc
Definition: hevcdec.h:158
MODE_INTER
@ MODE_INTER
Definition: hevcdec.h:153
ff_hevc_hls_filter
void ff_hevc_hls_filter(HEVCLocalContext *lc, int x, int y, int ctb_size)
Definition: hevc_filter.c:851
timecode.h
GetBitContext
Definition: get_bits.h:107
HEVCLocalContext::pu
PredictionUnit pu
Definition: hevcdec.h:483
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
ff_hevc_cu_chroma_qp_offset_flag
int ff_hevc_cu_chroma_qp_offset_flag(HEVCLocalContext *lc)
Definition: hevc_cabac.c:676
decode_lt_rps
static int decode_lt_rps(HEVCContext *s, LongTermRPS *rps, GetBitContext *gb)
Definition: hevcdec.c:268
TransformUnit::res_scale_val
int res_scale_val
Definition: hevcdec.h:380
SliceHeader::short_term_ref_pic_set_size
int short_term_ref_pic_set_size
Definition: hevcdec.h:271
hevc_decoder_class
static const AVClass hevc_decoder_class
Definition: hevcdec.c:3719
val
static double val(void *priv, double ch)
Definition: aeval.c:77
HWACCEL_VDPAU
#define HWACCEL_VDPAU(codec)
Definition: hwconfig.h:75
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
ff_hevc_output_frame
int ff_hevc_output_frame(HEVCContext *s, AVFrame *out, int flush)
Find next frame in output order and put a reference to it in frame.
Definition: hevc_refs.c:186
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:613
SliceHeader::long_term_ref_pic_set_size
int long_term_ref_pic_set_size
Definition: hevcdec.h:274
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:260
CTB
#define CTB(tab, x, y)
Definition: hevcdec.c:1014
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
ff_hevc_decode_nal_sei
int ff_hevc_decode_nal_sei(GetBitContext *gb, void *logctx, HEVCSEI *s, const HEVCParamSets *ps, enum HEVCNALUnitType type)
Definition: hevc_sei.c:278
AVRational::num
int num
Numerator.
Definition: rational.h:59
intra_prediction_unit
static void intra_prediction_unit(HEVCLocalContext *lc, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:2108
HEVC_NAL_UNSPEC62
@ HEVC_NAL_UNSPEC62
Definition: hevc.h:91
ff_hevc_deblocking_boundary_strengths
void ff_hevc_deblocking_boundary_strengths(HEVCLocalContext *lc, int x0, int y0, int log2_trafo_size)
Definition: hevc_filter.c:723
SliceHeader::slice_segment_addr
unsigned int slice_segment_addr
address (in raster order) of the first block in the current slice
Definition: hevcdec.h:256
hevc_parse.h
MvField::ref_idx
int8_t ref_idx[2]
Definition: hevcdec.h:354
SAOParams::eo_class
int eo_class[3]
sao_eo_class
Definition: hevcdsp.h:40
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:91
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:462
hevc_luma_mv_mvp_mode
static void hevc_luma_mv_mvp_mode(HEVCLocalContext *lc, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv)
Definition: hevcdec.c:1840
ff_thread_report_progress2
void ff_thread_report_progress2(AVCodecContext *avctx, int field, int thread, int n)
Definition: pthread_slice.c:210
first
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But first
Definition: rate_distortion.txt:12
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:992
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
QPEL_EXTRA_AFTER
#define QPEL_EXTRA_AFTER
Definition: hevcdec.h:64
HEVC_NAL_BLA_N_LP
@ HEVC_NAL_BLA_N_LP
Definition: hevc.h:47
SAOParams::type_idx
uint8_t type_idx[3]
sao_type_idx
Definition: hevcdsp.h:44
film_grain_params.h
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
TransformUnit::intra_pred_mode
int intra_pred_mode
Definition: hevcdec.h:383
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:524
HEVC_NAL_RADL_R
@ HEVC_NAL_RADL_R
Definition: hevc.h:36
hls_prediction_unit
static void hls_prediction_unit(HEVCLocalContext *lc, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int partIdx, int idx)
Definition: hevcdec.c:1885
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:548
SliceHeader::cabac_init_flag
uint8_t cabac_init_flag
Definition: hevcdec.h:287
H2645NAL::size
int size
Definition: h2645_parse.h:36
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:528
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:721
hls_transform_unit
static int hls_transform_unit(HEVCLocalContext *lc, int x0, int y0, int xBase, int yBase, int cb_xBase, int cb_yBase, int log2_cb_size, int log2_trafo_size, int blk_idx, int cbf_luma, int *cbf_cb, int *cbf_cr)
Definition: hevcdec.c:1118
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:306
QPEL_EXTRA_BEFORE
#define QPEL_EXTRA_BEFORE
Definition: hevcdec.h:63
ff_hevc_rem_intra_luma_pred_mode_decode
int ff_hevc_rem_intra_luma_pred_mode_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:773
ff_hevc_sao_merge_flag_decode
int ff_hevc_sao_merge_flag_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:571
AVMasteringDisplayMetadata::white_point
AVRational white_point[2]
CIE 1931 xy chromaticity coords of white point.
Definition: mastering_display_metadata.h:47
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:127
HEVCLocalContext::parent
const struct HEVCContext * parent
Definition: hevcdec.h:445
s
#define s(width, name)
Definition: cbs_vp9.c:256
ff_thread_await_progress2
void ff_thread_await_progress2(AVCodecContext *avctx, int field, int thread, int shift)
Definition: pthread_slice.c:222
SAO_NOT_APPLIED
@ SAO_NOT_APPLIED
Definition: hevcdec.h:210
set_sps
static int set_sps(HEVCContext *s, const HEVCSPS *sps, enum AVPixelFormat pix_fmt)
Definition: hevcdec.c:507
AV_ZERO32
#define AV_ZERO32(d)
Definition: intreadwrite.h:629
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demux_decode.c:41
ff_hevc_nal_is_nonref
static av_always_inline int ff_hevc_nal_is_nonref(enum HEVCNALUnitType type)
Definition: hevcdec.h:666
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
ff_hevc_set_new_ref
int ff_hevc_set_new_ref(HEVCContext *s, AVFrame **frame, int poc)
Definition: hevc_refs.c:137
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
SliceHeader::slice_rps
ShortTermRPS slice_rps
Definition: hevcdec.h:272
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
decode.h
IS_IDR
#define IS_IDR(s)
Definition: hevcdec.h:75
H2645NAL::data
const uint8_t * data
Definition: h2645_parse.h:35
ff_hevc_slice_rpl
int ff_hevc_slice_rpl(HEVCContext *s)
Construct the reference picture list(s) for the current slice.
Definition: hevc_refs.c:312
RefPicList::ref
struct HEVCFrame * ref[HEVC_MAX_REFS]
Definition: hevcdec.h:242
H2645NAL::skipped_bytes_pos
int * skipped_bytes_pos
Definition: h2645_parse.h:71
HEVC_SLICE_I
@ HEVC_SLICE_I
Definition: hevc.h:98
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
SliceHeader::size
int * size
Definition: hevcdec.h:312
ff_hevc_cabac_init
int ff_hevc_cabac_init(HEVCLocalContext *lc, int ctb_addr_ts)
Definition: hevc_cabac.c:512
ff_hevc_set_neighbour_available
void ff_hevc_set_neighbour_available(HEVCLocalContext *lc, int x0, int y0, int nPbW, int nPbH)
Definition: hevc_mvs.c:43
SliceHeader::collocated_list
uint8_t collocated_list
Definition: hevcdec.h:290
atomic_load
#define atomic_load(object)
Definition: stdatomic.h:93
AVCOL_PRI_UNSPECIFIED
@ AVCOL_PRI_UNSPECIFIED
Definition: pixfmt.h:536
AVDISCARD_BIDIR
@ AVDISCARD_BIDIR
discard all bidirectional frames
Definition: defs.h:73
get_se_golomb
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
Definition: golomb.h:239
INTRA_ANGULAR_26
@ INTRA_ANGULAR_26
Definition: hevcdec.h:198
H2645NAL::type
int type
NAL unit type.
Definition: h2645_parse.h:52
CodingUnit::max_trafo_depth
uint8_t max_trafo_depth
MaxTrafoDepth.
Definition: hevcdec.h:343
AV_FRAME_DATA_DYNAMIC_HDR_VIVID
@ AV_FRAME_DATA_DYNAMIC_HDR_VIVID
HDR Vivid dynamic metadata associated with a video frame.
Definition: frame.h:211
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
SliceHeader::slice_ctb_addr_rs
int slice_ctb_addr_rs
Definition: hevcdec.h:331
ff_thread_ref_frame
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
Definition: utils.c:896
FF_CODEC_PROPERTY_FILM_GRAIN
#define FF_CODEC_PROPERTY_FILM_GRAIN
Definition: avcodec.h:1866
arg
const char * arg
Definition: jacosubdec.c:67
if
if(ret)
Definition: filter_design.txt:179
HEVC_NAL_IDR_N_LP
@ HEVC_NAL_IDR_N_LP
Definition: hevc.h:49
SliceHeader::pic_output_flag
uint8_t pic_output_flag
Definition: hevcdec.h:266
ff_hevc_cbf_cb_cr_decode
int ff_hevc_cbf_cb_cr_decode(HEVCLocalContext *lc, int trafo_depth)
Definition: hevc_cabac.c:884
hls_slice_data_wpp
static int hls_slice_data_wpp(HEVCContext *s, const H2645NAL *nal)
Definition: hevcdec.c:2651
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:107
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:76
threadframe.h
PredictionUnit::rem_intra_luma_pred_mode
int rem_intra_luma_pred_mode
Definition: hevcdec.h:369
H2645NAL::raw_size
int raw_size
Definition: h2645_parse.h:44
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
IS_BLA
#define IS_BLA(s)
Definition: hevcdec.h:76
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
ff_bswapdsp_init
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
HEVC_SLICE_B
@ HEVC_SLICE_B
Definition: hevc.h:96
NULL
#define NULL
Definition: coverity.c:32
hevc_ref_frame
static int hevc_ref_frame(HEVCContext *s, HEVCFrame *dst, HEVCFrame *src)
Definition: hevcdec.c:3423
HEVC_SEQUENCE_COUNTER_MASK
#define HEVC_SEQUENCE_COUNTER_MASK
Definition: hevcdec.h:403
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
hls_coding_unit
static int hls_coding_unit(HEVCLocalContext *lc, const HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:2199
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1013
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
HEVCLocalContext::tmp
int16_t tmp[MAX_PB_SIZE *MAX_PB_SIZE]
Definition: hevcdec.h:479
ff_hevc_ps_uninit
void ff_hevc_ps_uninit(HEVCParamSets *ps)
Definition: hevc_ps.c:1954
HEVC_NAL_PPS
@ HEVC_NAL_PPS
Definition: hevc.h:63
LongTermRPS::poc
int poc[32]
Definition: hevcdec.h:235
AVCHROMA_LOC_LEFT
@ AVCHROMA_LOC_LEFT
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
Definition: pixfmt.h:682
CodingUnit::cu_transquant_bypass_flag
uint8_t cu_transquant_bypass_flag
Definition: hevcdec.h:344
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:461
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
HEVCLocalContext::first_qp_group
uint8_t first_qp_group
Definition: hevcdec.h:442
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
AVHWAccel::end_frame
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
Definition: avcodec.h:2188
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:378
ff_dovi_update_cfg
void ff_dovi_update_cfg(DOVIContext *s, const AVDOVIDecoderConfigurationRecord *cfg)
Read the contents of an AVDOVIDecoderConfigurationRecord (usually provided by stream side data) and u...
Definition: dovi_rpu.c:83
profiles.h
av_buffer_pool_uninit
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:322
ff_set_sar
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
Definition: utils.c:106
L0
#define L0
Definition: hevcdec.h:57
HEVCFrame::rpl_tab
RefPicListTab ** rpl_tab
Definition: hevcdec.h:413
LongTermRPS::poc_msb_present
uint8_t poc_msb_present[32]
Definition: hevcdec.h:236
HEVC_NAL_SEI_SUFFIX
@ HEVC_NAL_SEI_SUFFIX
Definition: hevc.h:69
ff_hevc_sao_band_position_decode
int ff_hevc_sao_band_position_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:586
HEVC_NAL_CRA_NUT
@ HEVC_NAL_CRA_NUT
Definition: hevc.h:50
av_frame_new_side_data_from_buf
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
Definition: frame.c:646
hevc_pel_weight
static const uint8_t hevc_pel_weight[65]
Definition: hevcdec.c:56
PART_Nx2N
@ PART_Nx2N
Definition: hevcdec.h:144
RefPicListTab
Definition: hevcdec.h:248
ff_hevc_split_coding_unit_flag_decode
int ff_hevc_split_coding_unit_flag_decode(HEVCLocalContext *lc, int ct_depth, int x0, int y0)
Definition: hevc_cabac.c:697
UPDATE_THREAD_CONTEXT
#define UPDATE_THREAD_CONTEXT(func)
Definition: codec_internal.h:281
BOUNDARY_UPPER_TILE
#define BOUNDARY_UPPER_TILE
Definition: hevcdec.h:489
vps
static int FUNC() vps(CodedBitstreamContext *ctx, RWContext *rw, H265RawVPS *current)
Definition: cbs_h265_syntax_template.c:423
ff_hevc_decode_extradata
int ff_hevc_decode_extradata(const uint8_t *data, int size, HEVCParamSets *ps, HEVCSEI *sei, int *is_nalff, int *nal_length_size, int err_recognition, int apply_defdispwin, void *logctx)
Definition: hevc_parse.c:80
AV_EF_CRCCHECK
#define AV_EF_CRCCHECK
Verify checksums embedded in the bitstream (could be of either encoded or decoded data,...
Definition: defs.h:48
SliceHeader::nb_refs
unsigned int nb_refs[2]
Definition: hevcdec.h:282
Mv::x
int16_t x
horizontal component of motion vector
Definition: hevcdec.h:348
ff_slice_thread_init_progress
int av_cold ff_slice_thread_init_progress(AVCodecContext *avctx)
Definition: pthread_slice.c:179
AVCodecContext::level
int level
level
Definition: avcodec.h:1703
hls_sao_param
static void hls_sao_param(HEVCLocalContext *lc, int rx, int ry)
Definition: hevcdec.c:1028
HEVC_NAL_RASL_R
@ HEVC_NAL_RASL_R
Definition: hevc.h:38
PF_BI
@ PF_BI
Definition: hevcdec.h:168
ff_hevc_no_residual_syntax_flag_decode
int ff_hevc_no_residual_syntax_flag_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:841
SAMPLE_CTB
#define SAMPLE_CTB(tab, x, y)
Definition: hevcdec.h:73
HEVCWindow
Definition: hevc_ps.h:43
SCAN_HORIZ
@ SCAN_HORIZ
Definition: hevcdec.h:225
hevc_data.h
ff_hevc_frame_rps
int ff_hevc_frame_rps(HEVCContext *s)
Construct the reference picture sets for the current frame.
Definition: hevc_refs.c:479
HEVCLocalContext::edge_emu_buffer
uint8_t edge_emu_buffer[(MAX_PB_SIZE+7) *EDGE_EMU_BUFFER_STRIDE *2]
Definition: hevcdec.h:476
hevc_await_progress
static void hevc_await_progress(const HEVCContext *s, const HEVCFrame *ref, const Mv *mv, int y0, int height)
Definition: hevcdec.c:1830
IS_IRAP
#define IS_IRAP(s)
Definition: hevcdec.h:78
LongTermRPS::used
uint8_t used[32]
Definition: hevcdec.h:237
SliceHeader::colour_plane_id
uint8_t colour_plane_id
RPS coded in the slice header itself is stored here.
Definition: hevcdec.h:267
PART_nLx2N
@ PART_nLx2N
Definition: hevcdec.h:148
SliceHeader::dependent_slice_segment_flag
uint8_t dependent_slice_segment_flag
Definition: hevcdec.h:265
POS
#define POS(c_idx, x, y)
SliceHeader::slice_act_cb_qp_offset
int slice_act_cb_qp_offset
Definition: hevcdec.h:299
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:75
SliceHeader::first_slice_in_pic_flag
uint8_t first_slice_in_pic_flag
Definition: hevcdec.h:264
HEVCLocalContext::ctb_left_flag
uint8_t ctb_left_flag
Definition: hevcdec.h:469
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
ff_hevc_res_scale_sign_flag
int ff_hevc_res_scale_sign_flag(HEVCLocalContext *lc, int idx)
Definition: hevc_cabac.c:919
ff_dovi_ctx_flush
void ff_dovi_ctx_flush(DOVIContext *s)
Partially reset the internal state.
Definition: dovi_rpu.c:53
ff_hevc_merge_idx_decode
int ff_hevc_merge_idx_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:794
AVPacket::size
int size
Definition: packet.h:375
BOUNDARY_UPPER_SLICE
#define BOUNDARY_UPPER_SLICE
Definition: hevcdec.h:488
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:115
hevcdec.h
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:352
decode_nal_units
static int decode_nal_units(HEVCContext *s, const uint8_t *buf, int length)
Definition: hevcdec.c:3172
codec_internal.h
SAOParams::offset_abs
int offset_abs[3][4]
sao_offset_abs
Definition: hevcdsp.h:35
AV_PIX_FMT_YUV422P10LE
@ AV_PIX_FMT_YUV422P10LE
planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
Definition: pixfmt.h:151
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:121
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
INTRA_PLANAR
@ INTRA_PLANAR
Definition: hevcdec.h:172
HEVCFrame::rpl_buf
AVBufferRef * rpl_buf
Definition: hevcdec.h:420
ff_hevc_decode_nal_sps
int ff_hevc_decode_nal_sps(GetBitContext *gb, AVCodecContext *avctx, HEVCParamSets *ps, int apply_defdispwin)
Definition: hevc_ps.c:1247
PART_2NxnD
@ PART_2NxnD
Definition: hevcdec.h:147
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:464
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: codec_internal.h:60
size
int size
Definition: twinvq_data.h:10344
HEVC_NAL_BLA_W_LP
@ HEVC_NAL_BLA_W_LP
Definition: hevc.h:45
SCAN_VERT
@ SCAN_VERT
Definition: hevcdec.h:226
FF_CODEC_CAP_ALLOCATE_PROGRESS
#define FF_CODEC_CAP_ALLOCATE_PROGRESS
Definition: codec_internal.h:69
ff_hevc_compute_poc
int ff_hevc_compute_poc(const HEVCSPS *sps, int pocTid0, int poc_lsb, int nal_unit_type)
Compute POC of the current frame and return it.
Definition: hevc_ps.c:1970
H2645NAL::gb
GetBitContext gb
Definition: h2645_parse.h:47
SliceHeader::collocated_ref_idx
unsigned int collocated_ref_idx
Definition: hevcdec.h:292
SliceHeader::entry_point_offset
unsigned * entry_point_offset
Definition: hevcdec.h:310
H2645NAL
Definition: h2645_parse.h:34
ff_hevc_cbf_luma_decode
int ff_hevc_cbf_luma_decode(HEVCLocalContext *lc, int trafo_depth)
Definition: hevc_cabac.c:889
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:466
ff_hevc_decode_nal_vps
int ff_hevc_decode_nal_vps(GetBitContext *gb, AVCodecContext *avctx, HEVCParamSets *ps)
Definition: hevc_ps.c:438
pic_arrays_free
static void pic_arrays_free(HEVCContext *s)
NOTE: Each function hls_foo correspond to the function foo in the specification (HLS stands for High ...
Definition: hevcdec.c:68
ff_hevc_luma_mv_merge_mode
void ff_hevc_luma_mv_merge_mode(HEVCLocalContext *lc, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv)
Definition: hevc_mvs.c:480
AVFrameSideData::data
uint8_t * data
Definition: frame.h:238
TransformUnit::chroma_mode_c
int chroma_mode_c
Definition: hevcdec.h:385
ff_hevc_prev_intra_luma_pred_flag_decode
int ff_hevc_prev_intra_luma_pred_flag_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:760
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1518
AVFilmGrainParams
This structure describes how to handle film grain synthesis in video for specific codecs.
Definition: film_grain_params.h:216
GetBitContext::index
int index
Definition: get_bits.h:109
SliceHeader::short_term_ref_pic_set_sps_flag
int short_term_ref_pic_set_sps_flag
Definition: hevcdec.h:270
AVCHROMA_LOC_UNSPECIFIED
@ AVCHROMA_LOC_UNSPECIFIED
Definition: pixfmt.h:681
SliceHeader::no_output_of_prior_pics_flag
uint8_t no_output_of_prior_pics_flag
Definition: hevcdec.h:279
AVCodecHWConfigInternal
Definition: hwconfig.h:29
MvField
Definition: hevcdec.h:352
QPEL_EXTRA
#define QPEL_EXTRA
Definition: hevcdec.h:65
ff_hevc_end_of_slice_flag_decode
int ff_hevc_end_of_slice_flag_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:618
PF_L1
@ PF_L1
Definition: hevcdec.h:167
ff_hevc_unref_frame
void ff_hevc_unref_frame(HEVCContext *s, HEVCFrame *frame, int flags)
Definition: hevc_refs.c:31
intra_prediction_unit_default_value
static void intra_prediction_unit_default_value(HEVCLocalContext *lc, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:2176
split
static char * split(char *message, char delim)
Definition: af_channelmap.c:81
get_format
static enum AVPixelFormat get_format(HEVCContext *s, const HEVCSPS *sps)
Definition: hevcdec.c:401
ff_h2645_packet_split
int ff_h2645_packet_split(H2645Packet *pkt, const uint8_t *buf, int length, void *logctx, int is_nalff, int nal_length_size, enum AVCodecID codec_id, int small_padding, int use_ref)
Split an input packet into NAL units.
Definition: h2645_parse.c:396
height
#define height
hevc_frame_end
static int hevc_frame_end(HEVCContext *s)
Definition: hevcdec.c:2952
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:111
HWACCEL_D3D11VA
#define HWACCEL_D3D11VA(codec)
Definition: hwconfig.h:79
av_content_light_metadata_create_side_data
AVContentLightMetadata * av_content_light_metadata_create_side_data(AVFrame *frame)
Allocate a complete AVContentLightMetadata and add it to the frame.
Definition: mastering_display_metadata.c:55
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
attributes.h
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:333
av_buffer_alloc
AVBufferRef * av_buffer_alloc(size_t size)
Allocate an AVBuffer of the given size using av_malloc().
Definition: buffer.c:77
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:71
hls_slice_data
static int hls_slice_data(HEVCContext *s)
Definition: hevcdec.c:2557
TransformUnit::cu_qp_offset_cb
int8_t cu_qp_offset_cb
Definition: hevcdec.h:388
pic_arrays_init
static int pic_arrays_init(HEVCContext *s, const HEVCSPS *sps)
Definition: hevcdec.c:96
HEVCFrame::rpl_tab_buf
AVBufferRef * rpl_tab_buf
Definition: hevcdec.h:419
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:119