FFmpeg
hevcdec.c
Go to the documentation of this file.
1 /*
2  * HEVC video Decoder
3  *
4  * Copyright (C) 2012 - 2013 Guillaume Martres
5  * Copyright (C) 2012 - 2013 Mickael Raulet
6  * Copyright (C) 2012 - 2013 Gildas Cocherel
7  * Copyright (C) 2012 - 2013 Wassim Hamidouche
8  *
9  * This file is part of FFmpeg.
10  *
11  * FFmpeg is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public
13  * License as published by the Free Software Foundation; either
14  * version 2.1 of the License, or (at your option) any later version.
15  *
16  * FFmpeg is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with FFmpeg; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24  */
25 
26 #include "libavutil/attributes.h"
27 #include "libavutil/common.h"
28 #include "libavutil/display.h"
30 #include "libavutil/internal.h"
32 #include "libavutil/md5.h"
33 #include "libavutil/opt.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/stereo3d.h"
36 #include "libavutil/timecode.h"
37 
38 #include "bswapdsp.h"
39 #include "bytestream.h"
40 #include "cabac_functions.h"
41 #include "golomb.h"
42 #include "hevc.h"
43 #include "hevc_data.h"
44 #include "hevc_parse.h"
45 #include "hevcdec.h"
46 #include "hwconfig.h"
47 #include "profiles.h"
48 
49 const uint8_t ff_hevc_pel_weight[65] = { [2] = 0, [4] = 1, [6] = 2, [8] = 3, [12] = 4, [16] = 5, [24] = 6, [32] = 7, [48] = 8, [64] = 9 };
50 
51 /**
52  * NOTE: Each function hls_foo correspond to the function foo in the
53  * specification (HLS stands for High Level Syntax).
54  */
55 
56 /**
57  * Section 5.7
58  */
59 
60 /* free everything allocated by pic_arrays_init() */
62 {
63  av_freep(&s->sao);
64  av_freep(&s->deblock);
65 
66  av_freep(&s->skip_flag);
67  av_freep(&s->tab_ct_depth);
68 
69  av_freep(&s->tab_ipm);
70  av_freep(&s->cbf_luma);
71  av_freep(&s->is_pcm);
72 
73  av_freep(&s->qp_y_tab);
74  av_freep(&s->tab_slice_address);
75  av_freep(&s->filter_slice_edges);
76 
77  av_freep(&s->horizontal_bs);
78  av_freep(&s->vertical_bs);
79 
80  av_freep(&s->sh.entry_point_offset);
81  av_freep(&s->sh.size);
82  av_freep(&s->sh.offset);
83 
84  av_buffer_pool_uninit(&s->tab_mvf_pool);
85  av_buffer_pool_uninit(&s->rpl_tab_pool);
86 }
87 
88 /* allocate arrays that depend on frame dimensions */
89 static int pic_arrays_init(HEVCContext *s, const HEVCSPS *sps)
90 {
91  int log2_min_cb_size = sps->log2_min_cb_size;
92  int width = sps->width;
93  int height = sps->height;
94  int pic_size_in_ctb = ((width >> log2_min_cb_size) + 1) *
95  ((height >> log2_min_cb_size) + 1);
96  int ctb_count = sps->ctb_width * sps->ctb_height;
97  int min_pu_size = sps->min_pu_width * sps->min_pu_height;
98 
99  s->bs_width = (width >> 2) + 1;
100  s->bs_height = (height >> 2) + 1;
101 
102  s->sao = av_calloc(ctb_count, sizeof(*s->sao));
103  s->deblock = av_calloc(ctb_count, sizeof(*s->deblock));
104  if (!s->sao || !s->deblock)
105  goto fail;
106 
107  s->skip_flag = av_malloc_array(sps->min_cb_height, sps->min_cb_width);
108  s->tab_ct_depth = av_malloc_array(sps->min_cb_height, sps->min_cb_width);
109  if (!s->skip_flag || !s->tab_ct_depth)
110  goto fail;
111 
112  s->cbf_luma = av_malloc_array(sps->min_tb_width, sps->min_tb_height);
113  s->tab_ipm = av_mallocz(min_pu_size);
114  s->is_pcm = av_malloc_array(sps->min_pu_width + 1, sps->min_pu_height + 1);
115  if (!s->tab_ipm || !s->cbf_luma || !s->is_pcm)
116  goto fail;
117 
118  s->filter_slice_edges = av_mallocz(ctb_count);
119  s->tab_slice_address = av_malloc_array(pic_size_in_ctb,
120  sizeof(*s->tab_slice_address));
121  s->qp_y_tab = av_malloc_array(pic_size_in_ctb,
122  sizeof(*s->qp_y_tab));
123  if (!s->qp_y_tab || !s->filter_slice_edges || !s->tab_slice_address)
124  goto fail;
125 
126  s->horizontal_bs = av_calloc(s->bs_width, s->bs_height);
127  s->vertical_bs = av_calloc(s->bs_width, s->bs_height);
128  if (!s->horizontal_bs || !s->vertical_bs)
129  goto fail;
130 
131  s->tab_mvf_pool = av_buffer_pool_init(min_pu_size * sizeof(MvField),
133  s->rpl_tab_pool = av_buffer_pool_init(ctb_count * sizeof(RefPicListTab),
135  if (!s->tab_mvf_pool || !s->rpl_tab_pool)
136  goto fail;
137 
138  return 0;
139 
140 fail:
142  return AVERROR(ENOMEM);
143 }
144 
146 {
147  int i = 0;
148  int j = 0;
149  uint8_t luma_weight_l0_flag[16];
150  uint8_t chroma_weight_l0_flag[16];
151  uint8_t luma_weight_l1_flag[16];
152  uint8_t chroma_weight_l1_flag[16];
153  int luma_log2_weight_denom;
154 
155  luma_log2_weight_denom = get_ue_golomb_long(gb);
156  if (luma_log2_weight_denom < 0 || luma_log2_weight_denom > 7) {
157  av_log(s->avctx, AV_LOG_ERROR, "luma_log2_weight_denom %d is invalid\n", luma_log2_weight_denom);
158  return AVERROR_INVALIDDATA;
159  }
160  s->sh.luma_log2_weight_denom = av_clip_uintp2(luma_log2_weight_denom, 3);
161  if (s->ps.sps->chroma_format_idc != 0) {
162  int64_t chroma_log2_weight_denom = luma_log2_weight_denom + (int64_t)get_se_golomb(gb);
163  if (chroma_log2_weight_denom < 0 || chroma_log2_weight_denom > 7) {
164  av_log(s->avctx, AV_LOG_ERROR, "chroma_log2_weight_denom %"PRId64" is invalid\n", chroma_log2_weight_denom);
165  return AVERROR_INVALIDDATA;
166  }
167  s->sh.chroma_log2_weight_denom = chroma_log2_weight_denom;
168  }
169 
170  for (i = 0; i < s->sh.nb_refs[L0]; i++) {
171  luma_weight_l0_flag[i] = get_bits1(gb);
172  if (!luma_weight_l0_flag[i]) {
173  s->sh.luma_weight_l0[i] = 1 << s->sh.luma_log2_weight_denom;
174  s->sh.luma_offset_l0[i] = 0;
175  }
176  }
177  if (s->ps.sps->chroma_format_idc != 0) {
178  for (i = 0; i < s->sh.nb_refs[L0]; i++)
179  chroma_weight_l0_flag[i] = get_bits1(gb);
180  } else {
181  for (i = 0; i < s->sh.nb_refs[L0]; i++)
182  chroma_weight_l0_flag[i] = 0;
183  }
184  for (i = 0; i < s->sh.nb_refs[L0]; i++) {
185  if (luma_weight_l0_flag[i]) {
186  int delta_luma_weight_l0 = get_se_golomb(gb);
187  if ((int8_t)delta_luma_weight_l0 != delta_luma_weight_l0)
188  return AVERROR_INVALIDDATA;
189  s->sh.luma_weight_l0[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l0;
190  s->sh.luma_offset_l0[i] = get_se_golomb(gb);
191  }
192  if (chroma_weight_l0_flag[i]) {
193  for (j = 0; j < 2; j++) {
194  int delta_chroma_weight_l0 = get_se_golomb(gb);
195  int delta_chroma_offset_l0 = get_se_golomb(gb);
196 
197  if ( (int8_t)delta_chroma_weight_l0 != delta_chroma_weight_l0
198  || delta_chroma_offset_l0 < -(1<<17) || delta_chroma_offset_l0 > (1<<17)) {
199  return AVERROR_INVALIDDATA;
200  }
201 
202  s->sh.chroma_weight_l0[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l0;
203  s->sh.chroma_offset_l0[i][j] = av_clip((delta_chroma_offset_l0 - ((128 * s->sh.chroma_weight_l0[i][j])
204  >> s->sh.chroma_log2_weight_denom) + 128), -128, 127);
205  }
206  } else {
207  s->sh.chroma_weight_l0[i][0] = 1 << s->sh.chroma_log2_weight_denom;
208  s->sh.chroma_offset_l0[i][0] = 0;
209  s->sh.chroma_weight_l0[i][1] = 1 << s->sh.chroma_log2_weight_denom;
210  s->sh.chroma_offset_l0[i][1] = 0;
211  }
212  }
213  if (s->sh.slice_type == HEVC_SLICE_B) {
214  for (i = 0; i < s->sh.nb_refs[L1]; i++) {
215  luma_weight_l1_flag[i] = get_bits1(gb);
216  if (!luma_weight_l1_flag[i]) {
217  s->sh.luma_weight_l1[i] = 1 << s->sh.luma_log2_weight_denom;
218  s->sh.luma_offset_l1[i] = 0;
219  }
220  }
221  if (s->ps.sps->chroma_format_idc != 0) {
222  for (i = 0; i < s->sh.nb_refs[L1]; i++)
223  chroma_weight_l1_flag[i] = get_bits1(gb);
224  } else {
225  for (i = 0; i < s->sh.nb_refs[L1]; i++)
226  chroma_weight_l1_flag[i] = 0;
227  }
228  for (i = 0; i < s->sh.nb_refs[L1]; i++) {
229  if (luma_weight_l1_flag[i]) {
230  int delta_luma_weight_l1 = get_se_golomb(gb);
231  if ((int8_t)delta_luma_weight_l1 != delta_luma_weight_l1)
232  return AVERROR_INVALIDDATA;
233  s->sh.luma_weight_l1[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l1;
234  s->sh.luma_offset_l1[i] = get_se_golomb(gb);
235  }
236  if (chroma_weight_l1_flag[i]) {
237  for (j = 0; j < 2; j++) {
238  int delta_chroma_weight_l1 = get_se_golomb(gb);
239  int delta_chroma_offset_l1 = get_se_golomb(gb);
240 
241  if ( (int8_t)delta_chroma_weight_l1 != delta_chroma_weight_l1
242  || delta_chroma_offset_l1 < -(1<<17) || delta_chroma_offset_l1 > (1<<17)) {
243  return AVERROR_INVALIDDATA;
244  }
245 
246  s->sh.chroma_weight_l1[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l1;
247  s->sh.chroma_offset_l1[i][j] = av_clip((delta_chroma_offset_l1 - ((128 * s->sh.chroma_weight_l1[i][j])
248  >> s->sh.chroma_log2_weight_denom) + 128), -128, 127);
249  }
250  } else {
251  s->sh.chroma_weight_l1[i][0] = 1 << s->sh.chroma_log2_weight_denom;
252  s->sh.chroma_offset_l1[i][0] = 0;
253  s->sh.chroma_weight_l1[i][1] = 1 << s->sh.chroma_log2_weight_denom;
254  s->sh.chroma_offset_l1[i][1] = 0;
255  }
256  }
257  }
258  return 0;
259 }
260 
262 {
263  const HEVCSPS *sps = s->ps.sps;
264  int max_poc_lsb = 1 << sps->log2_max_poc_lsb;
265  int prev_delta_msb = 0;
266  unsigned int nb_sps = 0, nb_sh;
267  int i;
268 
269  rps->nb_refs = 0;
270  if (!sps->long_term_ref_pics_present_flag)
271  return 0;
272 
273  if (sps->num_long_term_ref_pics_sps > 0)
274  nb_sps = get_ue_golomb_long(gb);
275  nb_sh = get_ue_golomb_long(gb);
276 
277  if (nb_sps > sps->num_long_term_ref_pics_sps)
278  return AVERROR_INVALIDDATA;
279  if (nb_sh + (uint64_t)nb_sps > FF_ARRAY_ELEMS(rps->poc))
280  return AVERROR_INVALIDDATA;
281 
282  rps->nb_refs = nb_sh + nb_sps;
283 
284  for (i = 0; i < rps->nb_refs; i++) {
285 
286  if (i < nb_sps) {
287  uint8_t lt_idx_sps = 0;
288 
289  if (sps->num_long_term_ref_pics_sps > 1)
290  lt_idx_sps = get_bits(gb, av_ceil_log2(sps->num_long_term_ref_pics_sps));
291 
292  rps->poc[i] = sps->lt_ref_pic_poc_lsb_sps[lt_idx_sps];
293  rps->used[i] = sps->used_by_curr_pic_lt_sps_flag[lt_idx_sps];
294  } else {
295  rps->poc[i] = get_bits(gb, sps->log2_max_poc_lsb);
296  rps->used[i] = get_bits1(gb);
297  }
298 
299  rps->poc_msb_present[i] = get_bits1(gb);
300  if (rps->poc_msb_present[i]) {
301  int64_t delta = get_ue_golomb_long(gb);
302  int64_t poc;
303 
304  if (i && i != nb_sps)
305  delta += prev_delta_msb;
306 
307  poc = rps->poc[i] + s->poc - delta * max_poc_lsb - s->sh.pic_order_cnt_lsb;
308  if (poc != (int32_t)poc)
309  return AVERROR_INVALIDDATA;
310  rps->poc[i] = poc;
311  prev_delta_msb = delta;
312  }
313  }
314 
315  return 0;
316 }
317 
319 {
320  AVCodecContext *avctx = s->avctx;
321  const HEVCParamSets *ps = &s->ps;
322  const HEVCVPS *vps = (const HEVCVPS*)ps->vps_list[sps->vps_id]->data;
323  const HEVCWindow *ow = &sps->output_window;
324  unsigned int num = 0, den = 0;
325 
326  avctx->pix_fmt = sps->pix_fmt;
327  avctx->coded_width = sps->width;
328  avctx->coded_height = sps->height;
329  avctx->width = sps->width - ow->left_offset - ow->right_offset;
330  avctx->height = sps->height - ow->top_offset - ow->bottom_offset;
331  avctx->has_b_frames = sps->temporal_layer[sps->max_sub_layers - 1].num_reorder_pics;
332  avctx->profile = sps->ptl.general_ptl.profile_idc;
333  avctx->level = sps->ptl.general_ptl.level_idc;
334 
335  ff_set_sar(avctx, sps->vui.sar);
336 
337  if (sps->vui.video_signal_type_present_flag)
338  avctx->color_range = sps->vui.video_full_range_flag ? AVCOL_RANGE_JPEG
340  else
341  avctx->color_range = AVCOL_RANGE_MPEG;
342 
343  if (sps->vui.colour_description_present_flag) {
344  avctx->color_primaries = sps->vui.colour_primaries;
345  avctx->color_trc = sps->vui.transfer_characteristic;
346  avctx->colorspace = sps->vui.matrix_coeffs;
347  } else {
351  }
352 
354  if (sps->chroma_format_idc == 1) {
355  if (sps->vui.chroma_loc_info_present_flag) {
356  if (sps->vui.chroma_sample_loc_type_top_field <= 5)
357  avctx->chroma_sample_location = sps->vui.chroma_sample_loc_type_top_field + 1;
358  } else
360  }
361 
362  if (vps->vps_timing_info_present_flag) {
363  num = vps->vps_num_units_in_tick;
364  den = vps->vps_time_scale;
365  } else if (sps->vui.vui_timing_info_present_flag) {
366  num = sps->vui.vui_num_units_in_tick;
367  den = sps->vui.vui_time_scale;
368  }
369 
370  if (num != 0 && den != 0)
371  av_reduce(&avctx->framerate.den, &avctx->framerate.num,
372  num, den, 1 << 30);
373 }
374 
376 {
377  AVCodecContext *avctx = s->avctx;
378 
379  if (s->sei.a53_caption.buf_ref)
380  s->avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS;
381 
382  if (s->sei.alternative_transfer.present &&
383  av_color_transfer_name(s->sei.alternative_transfer.preferred_transfer_characteristics) &&
384  s->sei.alternative_transfer.preferred_transfer_characteristics != AVCOL_TRC_UNSPECIFIED) {
385  avctx->color_trc = s->sei.alternative_transfer.preferred_transfer_characteristics;
386  }
387 
388  if (s->sei.film_grain_characteristics.present)
390 
391  return 0;
392 }
393 
395 {
396 #define HWACCEL_MAX (CONFIG_HEVC_DXVA2_HWACCEL + \
397  CONFIG_HEVC_D3D11VA_HWACCEL * 2 + \
398  CONFIG_HEVC_NVDEC_HWACCEL + \
399  CONFIG_HEVC_VAAPI_HWACCEL + \
400  CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL + \
401  CONFIG_HEVC_VDPAU_HWACCEL)
402  enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmt = pix_fmts;
403 
404  switch (sps->pix_fmt) {
405  case AV_PIX_FMT_YUV420P:
406  case AV_PIX_FMT_YUVJ420P:
407 #if CONFIG_HEVC_DXVA2_HWACCEL
408  *fmt++ = AV_PIX_FMT_DXVA2_VLD;
409 #endif
410 #if CONFIG_HEVC_D3D11VA_HWACCEL
411  *fmt++ = AV_PIX_FMT_D3D11VA_VLD;
412  *fmt++ = AV_PIX_FMT_D3D11;
413 #endif
414 #if CONFIG_HEVC_VAAPI_HWACCEL
415  *fmt++ = AV_PIX_FMT_VAAPI;
416 #endif
417 #if CONFIG_HEVC_VDPAU_HWACCEL
418  *fmt++ = AV_PIX_FMT_VDPAU;
419 #endif
420 #if CONFIG_HEVC_NVDEC_HWACCEL
421  *fmt++ = AV_PIX_FMT_CUDA;
422 #endif
423 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
424  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
425 #endif
426  break;
428 #if CONFIG_HEVC_DXVA2_HWACCEL
429  *fmt++ = AV_PIX_FMT_DXVA2_VLD;
430 #endif
431 #if CONFIG_HEVC_D3D11VA_HWACCEL
432  *fmt++ = AV_PIX_FMT_D3D11VA_VLD;
433  *fmt++ = AV_PIX_FMT_D3D11;
434 #endif
435 #if CONFIG_HEVC_VAAPI_HWACCEL
436  *fmt++ = AV_PIX_FMT_VAAPI;
437 #endif
438 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
439  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
440 #endif
441 #if CONFIG_HEVC_VDPAU_HWACCEL
442  *fmt++ = AV_PIX_FMT_VDPAU;
443 #endif
444 #if CONFIG_HEVC_NVDEC_HWACCEL
445  *fmt++ = AV_PIX_FMT_CUDA;
446 #endif
447  break;
448  case AV_PIX_FMT_YUV444P:
449 #if CONFIG_HEVC_VDPAU_HWACCEL
450  *fmt++ = AV_PIX_FMT_VDPAU;
451 #endif
452 #if CONFIG_HEVC_NVDEC_HWACCEL
453  *fmt++ = AV_PIX_FMT_CUDA;
454 #endif
455  break;
456  case AV_PIX_FMT_YUV422P:
458 #if CONFIG_HEVC_VAAPI_HWACCEL
459  *fmt++ = AV_PIX_FMT_VAAPI;
460 #endif
461  break;
465 #if CONFIG_HEVC_VDPAU_HWACCEL
466  *fmt++ = AV_PIX_FMT_VDPAU;
467 #endif
468 #if CONFIG_HEVC_NVDEC_HWACCEL
469  *fmt++ = AV_PIX_FMT_CUDA;
470 #endif
471  break;
472  }
473 
474  *fmt++ = sps->pix_fmt;
475  *fmt = AV_PIX_FMT_NONE;
476 
477  return ff_thread_get_format(s->avctx, pix_fmts);
478 }
479 
480 static int set_sps(HEVCContext *s, const HEVCSPS *sps,
481  enum AVPixelFormat pix_fmt)
482 {
483  int ret, i;
484 
486  s->ps.sps = NULL;
487  s->ps.vps = NULL;
488 
489  if (!sps)
490  return 0;
491 
492  ret = pic_arrays_init(s, sps);
493  if (ret < 0)
494  goto fail;
495 
497 
498  s->avctx->pix_fmt = pix_fmt;
499 
500  ff_hevc_pred_init(&s->hpc, sps->bit_depth);
501  ff_hevc_dsp_init (&s->hevcdsp, sps->bit_depth);
502  ff_videodsp_init (&s->vdsp, sps->bit_depth);
503 
504  for (i = 0; i < 3; i++) {
505  av_freep(&s->sao_pixel_buffer_h[i]);
506  av_freep(&s->sao_pixel_buffer_v[i]);
507  }
508 
509  if (sps->sao_enabled && !s->avctx->hwaccel) {
510  int c_count = (sps->chroma_format_idc != 0) ? 3 : 1;
511  int c_idx;
512 
513  for(c_idx = 0; c_idx < c_count; c_idx++) {
514  int w = sps->width >> sps->hshift[c_idx];
515  int h = sps->height >> sps->vshift[c_idx];
516  s->sao_pixel_buffer_h[c_idx] =
517  av_malloc((w * 2 * sps->ctb_height) <<
518  sps->pixel_shift);
519  s->sao_pixel_buffer_v[c_idx] =
520  av_malloc((h * 2 * sps->ctb_width) <<
521  sps->pixel_shift);
522  if (!s->sao_pixel_buffer_h[c_idx] ||
523  !s->sao_pixel_buffer_v[c_idx])
524  goto fail;
525  }
526  }
527 
528  s->ps.sps = sps;
529  s->ps.vps = (HEVCVPS*) s->ps.vps_list[s->ps.sps->vps_id]->data;
530 
531  return 0;
532 
533 fail:
535  for (i = 0; i < 3; i++) {
536  av_freep(&s->sao_pixel_buffer_h[i]);
537  av_freep(&s->sao_pixel_buffer_v[i]);
538  }
539  s->ps.sps = NULL;
540  return ret;
541 }
542 
544 {
545  GetBitContext *gb = &s->HEVClc->gb;
546  SliceHeader *sh = &s->sh;
547  int i, ret;
548 
549  // Coded parameters
551  if (s->ref && sh->first_slice_in_pic_flag) {
552  av_log(s->avctx, AV_LOG_ERROR, "Two slices reporting being the first in the same frame.\n");
553  return 1; // This slice will be skipped later, do not corrupt state
554  }
555 
556  if ((IS_IDR(s) || IS_BLA(s)) && sh->first_slice_in_pic_flag) {
557  s->seq_decode = (s->seq_decode + 1) & 0xff;
558  s->max_ra = INT_MAX;
559  if (IS_IDR(s))
561  }
563  if (IS_IRAP(s))
565 
566  sh->pps_id = get_ue_golomb_long(gb);
567  if (sh->pps_id >= HEVC_MAX_PPS_COUNT || !s->ps.pps_list[sh->pps_id]) {
568  av_log(s->avctx, AV_LOG_ERROR, "PPS id out of range: %d\n", sh->pps_id);
569  return AVERROR_INVALIDDATA;
570  }
571  if (!sh->first_slice_in_pic_flag &&
572  s->ps.pps != (HEVCPPS*)s->ps.pps_list[sh->pps_id]->data) {
573  av_log(s->avctx, AV_LOG_ERROR, "PPS changed between slices.\n");
574  return AVERROR_INVALIDDATA;
575  }
576  s->ps.pps = (HEVCPPS*)s->ps.pps_list[sh->pps_id]->data;
577  if (s->nal_unit_type == HEVC_NAL_CRA_NUT && s->last_eos == 1)
579 
580  if (s->ps.sps != (HEVCSPS*)s->ps.sps_list[s->ps.pps->sps_id]->data) {
581  const HEVCSPS *sps = (HEVCSPS*)s->ps.sps_list[s->ps.pps->sps_id]->data;
582  const HEVCSPS *last_sps = s->ps.sps;
583  enum AVPixelFormat pix_fmt;
584 
585  if (last_sps && IS_IRAP(s) && s->nal_unit_type != HEVC_NAL_CRA_NUT) {
586  if (sps->width != last_sps->width || sps->height != last_sps->height ||
587  sps->temporal_layer[sps->max_sub_layers - 1].max_dec_pic_buffering !=
588  last_sps->temporal_layer[last_sps->max_sub_layers - 1].max_dec_pic_buffering)
590  }
592 
593  ret = set_sps(s, sps, sps->pix_fmt);
594  if (ret < 0)
595  return ret;
596 
597  pix_fmt = get_format(s, sps);
598  if (pix_fmt < 0)
599  return pix_fmt;
600  s->avctx->pix_fmt = pix_fmt;
601 
602  s->seq_decode = (s->seq_decode + 1) & 0xff;
603  s->max_ra = INT_MAX;
604  }
605 
607  if (ret < 0)
608  return ret;
609 
611  if (!sh->first_slice_in_pic_flag) {
612  int slice_address_length;
613 
614  if (s->ps.pps->dependent_slice_segments_enabled_flag)
616 
617  slice_address_length = av_ceil_log2(s->ps.sps->ctb_width *
618  s->ps.sps->ctb_height);
619  sh->slice_segment_addr = get_bitsz(gb, slice_address_length);
620  if (sh->slice_segment_addr >= s->ps.sps->ctb_width * s->ps.sps->ctb_height) {
621  av_log(s->avctx, AV_LOG_ERROR,
622  "Invalid slice segment address: %u.\n",
623  sh->slice_segment_addr);
624  return AVERROR_INVALIDDATA;
625  }
626 
627  if (!sh->dependent_slice_segment_flag) {
628  sh->slice_addr = sh->slice_segment_addr;
629  s->slice_idx++;
630  }
631  } else {
632  sh->slice_segment_addr = sh->slice_addr = 0;
633  s->slice_idx = 0;
634  s->slice_initialized = 0;
635  }
636 
637  if (!sh->dependent_slice_segment_flag) {
638  s->slice_initialized = 0;
639 
640  for (i = 0; i < s->ps.pps->num_extra_slice_header_bits; i++)
641  skip_bits(gb, 1); // slice_reserved_undetermined_flag[]
642 
643  sh->slice_type = get_ue_golomb_long(gb);
644  if (!(sh->slice_type == HEVC_SLICE_I ||
645  sh->slice_type == HEVC_SLICE_P ||
646  sh->slice_type == HEVC_SLICE_B)) {
647  av_log(s->avctx, AV_LOG_ERROR, "Unknown slice type: %d.\n",
648  sh->slice_type);
649  return AVERROR_INVALIDDATA;
650  }
651  if (IS_IRAP(s) && sh->slice_type != HEVC_SLICE_I) {
652  av_log(s->avctx, AV_LOG_ERROR, "Inter slices in an IRAP frame.\n");
653  return AVERROR_INVALIDDATA;
654  }
655 
656  // when flag is not present, picture is inferred to be output
657  sh->pic_output_flag = 1;
658  if (s->ps.pps->output_flag_present_flag)
659  sh->pic_output_flag = get_bits1(gb);
660 
661  if (s->ps.sps->separate_colour_plane_flag)
662  sh->colour_plane_id = get_bits(gb, 2);
663 
664  if (!IS_IDR(s)) {
665  int poc, pos;
666 
667  sh->pic_order_cnt_lsb = get_bits(gb, s->ps.sps->log2_max_poc_lsb);
668  poc = ff_hevc_compute_poc(s->ps.sps, s->pocTid0, sh->pic_order_cnt_lsb, s->nal_unit_type);
669  if (!sh->first_slice_in_pic_flag && poc != s->poc) {
670  av_log(s->avctx, AV_LOG_WARNING,
671  "Ignoring POC change between slices: %d -> %d\n", s->poc, poc);
672  if (s->avctx->err_recognition & AV_EF_EXPLODE)
673  return AVERROR_INVALIDDATA;
674  poc = s->poc;
675  }
676  s->poc = poc;
677 
679  pos = get_bits_left(gb);
681  ret = ff_hevc_decode_short_term_rps(gb, s->avctx, &sh->slice_rps, s->ps.sps, 1);
682  if (ret < 0)
683  return ret;
684 
685  sh->short_term_rps = &sh->slice_rps;
686  } else {
687  int numbits, rps_idx;
688 
689  if (!s->ps.sps->nb_st_rps) {
690  av_log(s->avctx, AV_LOG_ERROR, "No ref lists in the SPS.\n");
691  return AVERROR_INVALIDDATA;
692  }
693 
694  numbits = av_ceil_log2(s->ps.sps->nb_st_rps);
695  rps_idx = numbits > 0 ? get_bits(gb, numbits) : 0;
696  sh->short_term_rps = &s->ps.sps->st_rps[rps_idx];
697  }
699 
700  pos = get_bits_left(gb);
701  ret = decode_lt_rps(s, &sh->long_term_rps, gb);
702  if (ret < 0) {
703  av_log(s->avctx, AV_LOG_WARNING, "Invalid long term RPS.\n");
704  if (s->avctx->err_recognition & AV_EF_EXPLODE)
705  return AVERROR_INVALIDDATA;
706  }
708 
709  if (s->ps.sps->sps_temporal_mvp_enabled_flag)
711  else
713  } else {
714  s->sh.short_term_rps = NULL;
715  s->poc = 0;
716  }
717 
718  /* 8.3.1 */
719  if (sh->first_slice_in_pic_flag && s->temporal_id == 0 &&
720  s->nal_unit_type != HEVC_NAL_TRAIL_N &&
721  s->nal_unit_type != HEVC_NAL_TSA_N &&
722  s->nal_unit_type != HEVC_NAL_STSA_N &&
723  s->nal_unit_type != HEVC_NAL_RADL_N &&
724  s->nal_unit_type != HEVC_NAL_RADL_R &&
725  s->nal_unit_type != HEVC_NAL_RASL_N &&
726  s->nal_unit_type != HEVC_NAL_RASL_R)
727  s->pocTid0 = s->poc;
728 
729  if (s->ps.sps->sao_enabled) {
731  if (s->ps.sps->chroma_format_idc) {
734  }
735  } else {
739  }
740 
741  sh->nb_refs[L0] = sh->nb_refs[L1] = 0;
742  if (sh->slice_type == HEVC_SLICE_P || sh->slice_type == HEVC_SLICE_B) {
743  int nb_refs;
744 
745  sh->nb_refs[L0] = s->ps.pps->num_ref_idx_l0_default_active;
746  if (sh->slice_type == HEVC_SLICE_B)
747  sh->nb_refs[L1] = s->ps.pps->num_ref_idx_l1_default_active;
748 
749  if (get_bits1(gb)) { // num_ref_idx_active_override_flag
750  sh->nb_refs[L0] = get_ue_golomb_long(gb) + 1;
751  if (sh->slice_type == HEVC_SLICE_B)
752  sh->nb_refs[L1] = get_ue_golomb_long(gb) + 1;
753  }
754  if (sh->nb_refs[L0] > HEVC_MAX_REFS || sh->nb_refs[L1] > HEVC_MAX_REFS) {
755  av_log(s->avctx, AV_LOG_ERROR, "Too many refs: %d/%d.\n",
756  sh->nb_refs[L0], sh->nb_refs[L1]);
757  return AVERROR_INVALIDDATA;
758  }
759 
760  sh->rpl_modification_flag[0] = 0;
761  sh->rpl_modification_flag[1] = 0;
762  nb_refs = ff_hevc_frame_nb_refs(s);
763  if (!nb_refs) {
764  av_log(s->avctx, AV_LOG_ERROR, "Zero refs for a frame with P or B slices.\n");
765  return AVERROR_INVALIDDATA;
766  }
767 
768  if (s->ps.pps->lists_modification_present_flag && nb_refs > 1) {
769  sh->rpl_modification_flag[0] = get_bits1(gb);
770  if (sh->rpl_modification_flag[0]) {
771  for (i = 0; i < sh->nb_refs[L0]; i++)
772  sh->list_entry_lx[0][i] = get_bits(gb, av_ceil_log2(nb_refs));
773  }
774 
775  if (sh->slice_type == HEVC_SLICE_B) {
776  sh->rpl_modification_flag[1] = get_bits1(gb);
777  if (sh->rpl_modification_flag[1] == 1)
778  for (i = 0; i < sh->nb_refs[L1]; i++)
779  sh->list_entry_lx[1][i] = get_bits(gb, av_ceil_log2(nb_refs));
780  }
781  }
782 
783  if (sh->slice_type == HEVC_SLICE_B)
784  sh->mvd_l1_zero_flag = get_bits1(gb);
785 
786  if (s->ps.pps->cabac_init_present_flag)
787  sh->cabac_init_flag = get_bits1(gb);
788  else
789  sh->cabac_init_flag = 0;
790 
791  sh->collocated_ref_idx = 0;
793  sh->collocated_list = L0;
794  if (sh->slice_type == HEVC_SLICE_B)
795  sh->collocated_list = !get_bits1(gb);
796 
797  if (sh->nb_refs[sh->collocated_list] > 1) {
799  if (sh->collocated_ref_idx >= sh->nb_refs[sh->collocated_list]) {
800  av_log(s->avctx, AV_LOG_ERROR,
801  "Invalid collocated_ref_idx: %d.\n",
802  sh->collocated_ref_idx);
803  return AVERROR_INVALIDDATA;
804  }
805  }
806  }
807 
808  if ((s->ps.pps->weighted_pred_flag && sh->slice_type == HEVC_SLICE_P) ||
809  (s->ps.pps->weighted_bipred_flag && sh->slice_type == HEVC_SLICE_B)) {
810  int ret = pred_weight_table(s, gb);
811  if (ret < 0)
812  return ret;
813  }
814 
816  if (sh->max_num_merge_cand < 1 || sh->max_num_merge_cand > 5) {
817  av_log(s->avctx, AV_LOG_ERROR,
818  "Invalid number of merging MVP candidates: %d.\n",
819  sh->max_num_merge_cand);
820  return AVERROR_INVALIDDATA;
821  }
822  }
823 
824  sh->slice_qp_delta = get_se_golomb(gb);
825 
826  if (s->ps.pps->pic_slice_level_chroma_qp_offsets_present_flag) {
829  if (sh->slice_cb_qp_offset < -12 || sh->slice_cb_qp_offset > 12 ||
830  sh->slice_cr_qp_offset < -12 || sh->slice_cr_qp_offset > 12) {
831  av_log(s->avctx, AV_LOG_ERROR, "Invalid slice cx qp offset.\n");
832  return AVERROR_INVALIDDATA;
833  }
834  } else {
835  sh->slice_cb_qp_offset = 0;
836  sh->slice_cr_qp_offset = 0;
837  }
838 
839  if (s->ps.pps->chroma_qp_offset_list_enabled_flag)
841  else
843 
844  if (s->ps.pps->deblocking_filter_control_present_flag) {
845  int deblocking_filter_override_flag = 0;
846 
847  if (s->ps.pps->deblocking_filter_override_enabled_flag)
848  deblocking_filter_override_flag = get_bits1(gb);
849 
850  if (deblocking_filter_override_flag) {
853  int beta_offset_div2 = get_se_golomb(gb);
854  int tc_offset_div2 = get_se_golomb(gb) ;
855  if (beta_offset_div2 < -6 || beta_offset_div2 > 6 ||
856  tc_offset_div2 < -6 || tc_offset_div2 > 6) {
857  av_log(s->avctx, AV_LOG_ERROR,
858  "Invalid deblock filter offsets: %d, %d\n",
859  beta_offset_div2, tc_offset_div2);
860  return AVERROR_INVALIDDATA;
861  }
862  sh->beta_offset = beta_offset_div2 * 2;
863  sh->tc_offset = tc_offset_div2 * 2;
864  }
865  } else {
866  sh->disable_deblocking_filter_flag = s->ps.pps->disable_dbf;
867  sh->beta_offset = s->ps.pps->beta_offset;
868  sh->tc_offset = s->ps.pps->tc_offset;
869  }
870  } else {
872  sh->beta_offset = 0;
873  sh->tc_offset = 0;
874  }
875 
876  if (s->ps.pps->seq_loop_filter_across_slices_enabled_flag &&
881  } else {
882  sh->slice_loop_filter_across_slices_enabled_flag = s->ps.pps->seq_loop_filter_across_slices_enabled_flag;
883  }
884  } else if (!s->slice_initialized) {
885  av_log(s->avctx, AV_LOG_ERROR, "Independent slice segment missing.\n");
886  return AVERROR_INVALIDDATA;
887  }
888 
889  sh->num_entry_point_offsets = 0;
890  if (s->ps.pps->tiles_enabled_flag || s->ps.pps->entropy_coding_sync_enabled_flag) {
891  unsigned num_entry_point_offsets = get_ue_golomb_long(gb);
892  // It would be possible to bound this tighter but this here is simpler
893  if (num_entry_point_offsets > get_bits_left(gb)) {
894  av_log(s->avctx, AV_LOG_ERROR, "num_entry_point_offsets %d is invalid\n", num_entry_point_offsets);
895  return AVERROR_INVALIDDATA;
896  }
897 
898  sh->num_entry_point_offsets = num_entry_point_offsets;
899  if (sh->num_entry_point_offsets > 0) {
900  int offset_len = get_ue_golomb_long(gb) + 1;
901 
902  if (offset_len < 1 || offset_len > 32) {
903  sh->num_entry_point_offsets = 0;
904  av_log(s->avctx, AV_LOG_ERROR, "offset_len %d is invalid\n", offset_len);
905  return AVERROR_INVALIDDATA;
906  }
907 
909  av_freep(&sh->offset);
910  av_freep(&sh->size);
911  sh->entry_point_offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(unsigned));
912  sh->offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
913  sh->size = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
914  if (!sh->entry_point_offset || !sh->offset || !sh->size) {
915  sh->num_entry_point_offsets = 0;
916  av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate memory\n");
917  return AVERROR(ENOMEM);
918  }
919  for (i = 0; i < sh->num_entry_point_offsets; i++) {
920  unsigned val = get_bits_long(gb, offset_len);
921  sh->entry_point_offset[i] = val + 1; // +1; // +1 to get the size
922  }
923  if (s->threads_number > 1 && (s->ps.pps->num_tile_rows > 1 || s->ps.pps->num_tile_columns > 1)) {
924  s->enable_parallel_tiles = 0; // TODO: you can enable tiles in parallel here
925  s->threads_number = 1;
926  } else
927  s->enable_parallel_tiles = 0;
928  } else
929  s->enable_parallel_tiles = 0;
930  }
931 
932  if (s->ps.pps->slice_header_extension_present_flag) {
933  unsigned int length = get_ue_golomb_long(gb);
934  if (length*8LL > get_bits_left(gb)) {
935  av_log(s->avctx, AV_LOG_ERROR, "too many slice_header_extension_data_bytes\n");
936  return AVERROR_INVALIDDATA;
937  }
938  for (i = 0; i < length; i++)
939  skip_bits(gb, 8); // slice_header_extension_data_byte
940  }
941 
942  // Inferred parameters
943  sh->slice_qp = 26U + s->ps.pps->pic_init_qp_minus26 + sh->slice_qp_delta;
944  if (sh->slice_qp > 51 ||
945  sh->slice_qp < -s->ps.sps->qp_bd_offset) {
946  av_log(s->avctx, AV_LOG_ERROR,
947  "The slice_qp %d is outside the valid range "
948  "[%d, 51].\n",
949  sh->slice_qp,
950  -s->ps.sps->qp_bd_offset);
951  return AVERROR_INVALIDDATA;
952  }
953 
955 
956  if (!s->sh.slice_ctb_addr_rs && s->sh.dependent_slice_segment_flag) {
957  av_log(s->avctx, AV_LOG_ERROR, "Impossible slice segment.\n");
958  return AVERROR_INVALIDDATA;
959  }
960 
961  if (get_bits_left(gb) < 0) {
962  av_log(s->avctx, AV_LOG_ERROR,
963  "Overread slice header by %d bits\n", -get_bits_left(gb));
964  return AVERROR_INVALIDDATA;
965  }
966 
967  s->HEVClc->first_qp_group = !s->sh.dependent_slice_segment_flag;
968 
969  if (!s->ps.pps->cu_qp_delta_enabled_flag)
970  s->HEVClc->qp_y = s->sh.slice_qp;
971 
972  s->slice_initialized = 1;
973  s->HEVClc->tu.cu_qp_offset_cb = 0;
974  s->HEVClc->tu.cu_qp_offset_cr = 0;
975 
976  return 0;
977 }
978 
979 #define CTB(tab, x, y) ((tab)[(y) * s->ps.sps->ctb_width + (x)])
980 
981 #define SET_SAO(elem, value) \
982 do { \
983  if (!sao_merge_up_flag && !sao_merge_left_flag) \
984  sao->elem = value; \
985  else if (sao_merge_left_flag) \
986  sao->elem = CTB(s->sao, rx-1, ry).elem; \
987  else if (sao_merge_up_flag) \
988  sao->elem = CTB(s->sao, rx, ry-1).elem; \
989  else \
990  sao->elem = 0; \
991 } while (0)
992 
993 static void hls_sao_param(HEVCContext *s, int rx, int ry)
994 {
995  HEVCLocalContext *lc = s->HEVClc;
996  int sao_merge_left_flag = 0;
997  int sao_merge_up_flag = 0;
998  SAOParams *sao = &CTB(s->sao, rx, ry);
999  int c_idx, i;
1000 
1001  if (s->sh.slice_sample_adaptive_offset_flag[0] ||
1002  s->sh.slice_sample_adaptive_offset_flag[1]) {
1003  if (rx > 0) {
1004  if (lc->ctb_left_flag)
1005  sao_merge_left_flag = ff_hevc_sao_merge_flag_decode(s);
1006  }
1007  if (ry > 0 && !sao_merge_left_flag) {
1008  if (lc->ctb_up_flag)
1009  sao_merge_up_flag = ff_hevc_sao_merge_flag_decode(s);
1010  }
1011  }
1012 
1013  for (c_idx = 0; c_idx < (s->ps.sps->chroma_format_idc ? 3 : 1); c_idx++) {
1014  int log2_sao_offset_scale = c_idx == 0 ? s->ps.pps->log2_sao_offset_scale_luma :
1015  s->ps.pps->log2_sao_offset_scale_chroma;
1016 
1017  if (!s->sh.slice_sample_adaptive_offset_flag[c_idx]) {
1018  sao->type_idx[c_idx] = SAO_NOT_APPLIED;
1019  continue;
1020  }
1021 
1022  if (c_idx == 2) {
1023  sao->type_idx[2] = sao->type_idx[1];
1024  sao->eo_class[2] = sao->eo_class[1];
1025  } else {
1026  SET_SAO(type_idx[c_idx], ff_hevc_sao_type_idx_decode(s));
1027  }
1028 
1029  if (sao->type_idx[c_idx] == SAO_NOT_APPLIED)
1030  continue;
1031 
1032  for (i = 0; i < 4; i++)
1033  SET_SAO(offset_abs[c_idx][i], ff_hevc_sao_offset_abs_decode(s));
1034 
1035  if (sao->type_idx[c_idx] == SAO_BAND) {
1036  for (i = 0; i < 4; i++) {
1037  if (sao->offset_abs[c_idx][i]) {
1038  SET_SAO(offset_sign[c_idx][i],
1040  } else {
1041  sao->offset_sign[c_idx][i] = 0;
1042  }
1043  }
1044  SET_SAO(band_position[c_idx], ff_hevc_sao_band_position_decode(s));
1045  } else if (c_idx != 2) {
1046  SET_SAO(eo_class[c_idx], ff_hevc_sao_eo_class_decode(s));
1047  }
1048 
1049  // Inferred parameters
1050  sao->offset_val[c_idx][0] = 0;
1051  for (i = 0; i < 4; i++) {
1052  sao->offset_val[c_idx][i + 1] = sao->offset_abs[c_idx][i];
1053  if (sao->type_idx[c_idx] == SAO_EDGE) {
1054  if (i > 1)
1055  sao->offset_val[c_idx][i + 1] = -sao->offset_val[c_idx][i + 1];
1056  } else if (sao->offset_sign[c_idx][i]) {
1057  sao->offset_val[c_idx][i + 1] = -sao->offset_val[c_idx][i + 1];
1058  }
1059  sao->offset_val[c_idx][i + 1] *= 1 << log2_sao_offset_scale;
1060  }
1061  }
1062 }
1063 
1064 #undef SET_SAO
1065 #undef CTB
1066 
1067 static int hls_cross_component_pred(HEVCContext *s, int idx) {
1068  HEVCLocalContext *lc = s->HEVClc;
1069  int log2_res_scale_abs_plus1 = ff_hevc_log2_res_scale_abs(s, idx);
1070 
1071  if (log2_res_scale_abs_plus1 != 0) {
1072  int res_scale_sign_flag = ff_hevc_res_scale_sign_flag(s, idx);
1073  lc->tu.res_scale_val = (1 << (log2_res_scale_abs_plus1 - 1)) *
1074  (1 - 2 * res_scale_sign_flag);
1075  } else {
1076  lc->tu.res_scale_val = 0;
1077  }
1078 
1079 
1080  return 0;
1081 }
1082 
1083 static int hls_transform_unit(HEVCContext *s, int x0, int y0,
1084  int xBase, int yBase, int cb_xBase, int cb_yBase,
1085  int log2_cb_size, int log2_trafo_size,
1086  int blk_idx, int cbf_luma, int *cbf_cb, int *cbf_cr)
1087 {
1088  HEVCLocalContext *lc = s->HEVClc;
1089  const int log2_trafo_size_c = log2_trafo_size - s->ps.sps->hshift[1];
1090  int i;
1091 
1092  if (lc->cu.pred_mode == MODE_INTRA) {
1093  int trafo_size = 1 << log2_trafo_size;
1094  ff_hevc_set_neighbour_available(s, x0, y0, trafo_size, trafo_size);
1095 
1096  s->hpc.intra_pred[log2_trafo_size - 2](s, x0, y0, 0);
1097  }
1098 
1099  if (cbf_luma || cbf_cb[0] || cbf_cr[0] ||
1100  (s->ps.sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1]))) {
1101  int scan_idx = SCAN_DIAG;
1102  int scan_idx_c = SCAN_DIAG;
1103  int cbf_chroma = cbf_cb[0] || cbf_cr[0] ||
1104  (s->ps.sps->chroma_format_idc == 2 &&
1105  (cbf_cb[1] || cbf_cr[1]));
1106 
1107  if (s->ps.pps->cu_qp_delta_enabled_flag && !lc->tu.is_cu_qp_delta_coded) {
1109  if (lc->tu.cu_qp_delta != 0)
1110  if (ff_hevc_cu_qp_delta_sign_flag(s) == 1)
1111  lc->tu.cu_qp_delta = -lc->tu.cu_qp_delta;
1112  lc->tu.is_cu_qp_delta_coded = 1;
1113 
1114  if (lc->tu.cu_qp_delta < -(26 + s->ps.sps->qp_bd_offset / 2) ||
1115  lc->tu.cu_qp_delta > (25 + s->ps.sps->qp_bd_offset / 2)) {
1116  av_log(s->avctx, AV_LOG_ERROR,
1117  "The cu_qp_delta %d is outside the valid range "
1118  "[%d, %d].\n",
1119  lc->tu.cu_qp_delta,
1120  -(26 + s->ps.sps->qp_bd_offset / 2),
1121  (25 + s->ps.sps->qp_bd_offset / 2));
1122  return AVERROR_INVALIDDATA;
1123  }
1124 
1125  ff_hevc_set_qPy(s, cb_xBase, cb_yBase, log2_cb_size);
1126  }
1127 
1128  if (s->sh.cu_chroma_qp_offset_enabled_flag && cbf_chroma &&
1130  int cu_chroma_qp_offset_flag = ff_hevc_cu_chroma_qp_offset_flag(s);
1131  if (cu_chroma_qp_offset_flag) {
1132  int cu_chroma_qp_offset_idx = 0;
1133  if (s->ps.pps->chroma_qp_offset_list_len_minus1 > 0) {
1134  cu_chroma_qp_offset_idx = ff_hevc_cu_chroma_qp_offset_idx(s);
1135  av_log(s->avctx, AV_LOG_ERROR,
1136  "cu_chroma_qp_offset_idx not yet tested.\n");
1137  }
1138  lc->tu.cu_qp_offset_cb = s->ps.pps->cb_qp_offset_list[cu_chroma_qp_offset_idx];
1139  lc->tu.cu_qp_offset_cr = s->ps.pps->cr_qp_offset_list[cu_chroma_qp_offset_idx];
1140  } else {
1141  lc->tu.cu_qp_offset_cb = 0;
1142  lc->tu.cu_qp_offset_cr = 0;
1143  }
1145  }
1146 
1147  if (lc->cu.pred_mode == MODE_INTRA && log2_trafo_size < 4) {
1148  if (lc->tu.intra_pred_mode >= 6 &&
1149  lc->tu.intra_pred_mode <= 14) {
1150  scan_idx = SCAN_VERT;
1151  } else if (lc->tu.intra_pred_mode >= 22 &&
1152  lc->tu.intra_pred_mode <= 30) {
1153  scan_idx = SCAN_HORIZ;
1154  }
1155 
1156  if (lc->tu.intra_pred_mode_c >= 6 &&
1157  lc->tu.intra_pred_mode_c <= 14) {
1158  scan_idx_c = SCAN_VERT;
1159  } else if (lc->tu.intra_pred_mode_c >= 22 &&
1160  lc->tu.intra_pred_mode_c <= 30) {
1161  scan_idx_c = SCAN_HORIZ;
1162  }
1163  }
1164 
1165  lc->tu.cross_pf = 0;
1166 
1167  if (cbf_luma)
1168  ff_hevc_hls_residual_coding(s, x0, y0, log2_trafo_size, scan_idx, 0);
1169  if (s->ps.sps->chroma_format_idc && (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3)) {
1170  int trafo_size_h = 1 << (log2_trafo_size_c + s->ps.sps->hshift[1]);
1171  int trafo_size_v = 1 << (log2_trafo_size_c + s->ps.sps->vshift[1]);
1172  lc->tu.cross_pf = (s->ps.pps->cross_component_prediction_enabled_flag && cbf_luma &&
1173  (lc->cu.pred_mode == MODE_INTER ||
1174  (lc->tu.chroma_mode_c == 4)));
1175 
1176  if (lc->tu.cross_pf) {
1178  }
1179  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1180  if (lc->cu.pred_mode == MODE_INTRA) {
1181  ff_hevc_set_neighbour_available(s, x0, y0 + (i << log2_trafo_size_c), trafo_size_h, trafo_size_v);
1182  s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (i << log2_trafo_size_c), 1);
1183  }
1184  if (cbf_cb[i])
1185  ff_hevc_hls_residual_coding(s, x0, y0 + (i << log2_trafo_size_c),
1186  log2_trafo_size_c, scan_idx_c, 1);
1187  else
1188  if (lc->tu.cross_pf) {
1189  ptrdiff_t stride = s->frame->linesize[1];
1190  int hshift = s->ps.sps->hshift[1];
1191  int vshift = s->ps.sps->vshift[1];
1192  int16_t *coeffs_y = (int16_t*)lc->edge_emu_buffer;
1193  int16_t *coeffs = (int16_t*)lc->edge_emu_buffer2;
1194  int size = 1 << log2_trafo_size_c;
1195 
1196  uint8_t *dst = &s->frame->data[1][(y0 >> vshift) * stride +
1197  ((x0 >> hshift) << s->ps.sps->pixel_shift)];
1198  for (i = 0; i < (size * size); i++) {
1199  coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3);
1200  }
1201  s->hevcdsp.add_residual[log2_trafo_size_c-2](dst, coeffs, stride);
1202  }
1203  }
1204 
1205  if (lc->tu.cross_pf) {
1207  }
1208  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1209  if (lc->cu.pred_mode == MODE_INTRA) {
1210  ff_hevc_set_neighbour_available(s, x0, y0 + (i << log2_trafo_size_c), trafo_size_h, trafo_size_v);
1211  s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (i << log2_trafo_size_c), 2);
1212  }
1213  if (cbf_cr[i])
1214  ff_hevc_hls_residual_coding(s, x0, y0 + (i << log2_trafo_size_c),
1215  log2_trafo_size_c, scan_idx_c, 2);
1216  else
1217  if (lc->tu.cross_pf) {
1218  ptrdiff_t stride = s->frame->linesize[2];
1219  int hshift = s->ps.sps->hshift[2];
1220  int vshift = s->ps.sps->vshift[2];
1221  int16_t *coeffs_y = (int16_t*)lc->edge_emu_buffer;
1222  int16_t *coeffs = (int16_t*)lc->edge_emu_buffer2;
1223  int size = 1 << log2_trafo_size_c;
1224 
1225  uint8_t *dst = &s->frame->data[2][(y0 >> vshift) * stride +
1226  ((x0 >> hshift) << s->ps.sps->pixel_shift)];
1227  for (i = 0; i < (size * size); i++) {
1228  coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3);
1229  }
1230  s->hevcdsp.add_residual[log2_trafo_size_c-2](dst, coeffs, stride);
1231  }
1232  }
1233  } else if (s->ps.sps->chroma_format_idc && blk_idx == 3) {
1234  int trafo_size_h = 1 << (log2_trafo_size + 1);
1235  int trafo_size_v = 1 << (log2_trafo_size + s->ps.sps->vshift[1]);
1236  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1237  if (lc->cu.pred_mode == MODE_INTRA) {
1238  ff_hevc_set_neighbour_available(s, xBase, yBase + (i << log2_trafo_size),
1239  trafo_size_h, trafo_size_v);
1240  s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (i << log2_trafo_size), 1);
1241  }
1242  if (cbf_cb[i])
1243  ff_hevc_hls_residual_coding(s, xBase, yBase + (i << log2_trafo_size),
1244  log2_trafo_size, scan_idx_c, 1);
1245  }
1246  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1247  if (lc->cu.pred_mode == MODE_INTRA) {
1248  ff_hevc_set_neighbour_available(s, xBase, yBase + (i << log2_trafo_size),
1249  trafo_size_h, trafo_size_v);
1250  s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (i << log2_trafo_size), 2);
1251  }
1252  if (cbf_cr[i])
1253  ff_hevc_hls_residual_coding(s, xBase, yBase + (i << log2_trafo_size),
1254  log2_trafo_size, scan_idx_c, 2);
1255  }
1256  }
1257  } else if (s->ps.sps->chroma_format_idc && lc->cu.pred_mode == MODE_INTRA) {
1258  if (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3) {
1259  int trafo_size_h = 1 << (log2_trafo_size_c + s->ps.sps->hshift[1]);
1260  int trafo_size_v = 1 << (log2_trafo_size_c + s->ps.sps->vshift[1]);
1261  ff_hevc_set_neighbour_available(s, x0, y0, trafo_size_h, trafo_size_v);
1262  s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0, 1);
1263  s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0, 2);
1264  if (s->ps.sps->chroma_format_idc == 2) {
1265  ff_hevc_set_neighbour_available(s, x0, y0 + (1 << log2_trafo_size_c),
1266  trafo_size_h, trafo_size_v);
1267  s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (1 << log2_trafo_size_c), 1);
1268  s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (1 << log2_trafo_size_c), 2);
1269  }
1270  } else if (blk_idx == 3) {
1271  int trafo_size_h = 1 << (log2_trafo_size + 1);
1272  int trafo_size_v = 1 << (log2_trafo_size + s->ps.sps->vshift[1]);
1273  ff_hevc_set_neighbour_available(s, xBase, yBase,
1274  trafo_size_h, trafo_size_v);
1275  s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase, 1);
1276  s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase, 2);
1277  if (s->ps.sps->chroma_format_idc == 2) {
1278  ff_hevc_set_neighbour_available(s, xBase, yBase + (1 << (log2_trafo_size)),
1279  trafo_size_h, trafo_size_v);
1280  s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (1 << (log2_trafo_size)), 1);
1281  s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (1 << (log2_trafo_size)), 2);
1282  }
1283  }
1284  }
1285 
1286  return 0;
1287 }
1288 
1289 static void set_deblocking_bypass(HEVCContext *s, int x0, int y0, int log2_cb_size)
1290 {
1291  int cb_size = 1 << log2_cb_size;
1292  int log2_min_pu_size = s->ps.sps->log2_min_pu_size;
1293 
1294  int min_pu_width = s->ps.sps->min_pu_width;
1295  int x_end = FFMIN(x0 + cb_size, s->ps.sps->width);
1296  int y_end = FFMIN(y0 + cb_size, s->ps.sps->height);
1297  int i, j;
1298 
1299  for (j = (y0 >> log2_min_pu_size); j < (y_end >> log2_min_pu_size); j++)
1300  for (i = (x0 >> log2_min_pu_size); i < (x_end >> log2_min_pu_size); i++)
1301  s->is_pcm[i + j * min_pu_width] = 2;
1302 }
1303 
1304 static int hls_transform_tree(HEVCContext *s, int x0, int y0,
1305  int xBase, int yBase, int cb_xBase, int cb_yBase,
1306  int log2_cb_size, int log2_trafo_size,
1307  int trafo_depth, int blk_idx,
1308  const int *base_cbf_cb, const int *base_cbf_cr)
1309 {
1310  HEVCLocalContext *lc = s->HEVClc;
1311  uint8_t split_transform_flag;
1312  int cbf_cb[2];
1313  int cbf_cr[2];
1314  int ret;
1315 
1316  cbf_cb[0] = base_cbf_cb[0];
1317  cbf_cb[1] = base_cbf_cb[1];
1318  cbf_cr[0] = base_cbf_cr[0];
1319  cbf_cr[1] = base_cbf_cr[1];
1320 
1321  if (lc->cu.intra_split_flag) {
1322  if (trafo_depth == 1) {
1323  lc->tu.intra_pred_mode = lc->pu.intra_pred_mode[blk_idx];
1324  if (s->ps.sps->chroma_format_idc == 3) {
1325  lc->tu.intra_pred_mode_c = lc->pu.intra_pred_mode_c[blk_idx];
1326  lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[blk_idx];
1327  } else {
1329  lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[0];
1330  }
1331  }
1332  } else {
1333  lc->tu.intra_pred_mode = lc->pu.intra_pred_mode[0];
1335  lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[0];
1336  }
1337 
1338  if (log2_trafo_size <= s->ps.sps->log2_max_trafo_size &&
1339  log2_trafo_size > s->ps.sps->log2_min_tb_size &&
1340  trafo_depth < lc->cu.max_trafo_depth &&
1341  !(lc->cu.intra_split_flag && trafo_depth == 0)) {
1342  split_transform_flag = ff_hevc_split_transform_flag_decode(s, log2_trafo_size);
1343  } else {
1344  int inter_split = s->ps.sps->max_transform_hierarchy_depth_inter == 0 &&
1345  lc->cu.pred_mode == MODE_INTER &&
1346  lc->cu.part_mode != PART_2Nx2N &&
1347  trafo_depth == 0;
1348 
1349  split_transform_flag = log2_trafo_size > s->ps.sps->log2_max_trafo_size ||
1350  (lc->cu.intra_split_flag && trafo_depth == 0) ||
1351  inter_split;
1352  }
1353 
1354  if (s->ps.sps->chroma_format_idc && (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3)) {
1355  if (trafo_depth == 0 || cbf_cb[0]) {
1356  cbf_cb[0] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
1357  if (s->ps.sps->chroma_format_idc == 2 && (!split_transform_flag || log2_trafo_size == 3)) {
1358  cbf_cb[1] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
1359  }
1360  }
1361 
1362  if (trafo_depth == 0 || cbf_cr[0]) {
1363  cbf_cr[0] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
1364  if (s->ps.sps->chroma_format_idc == 2 && (!split_transform_flag || log2_trafo_size == 3)) {
1365  cbf_cr[1] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
1366  }
1367  }
1368  }
1369 
1370  if (split_transform_flag) {
1371  const int trafo_size_split = 1 << (log2_trafo_size - 1);
1372  const int x1 = x0 + trafo_size_split;
1373  const int y1 = y0 + trafo_size_split;
1374 
1375 #define SUBDIVIDE(x, y, idx) \
1376 do { \
1377  ret = hls_transform_tree(s, x, y, x0, y0, cb_xBase, cb_yBase, log2_cb_size, \
1378  log2_trafo_size - 1, trafo_depth + 1, idx, \
1379  cbf_cb, cbf_cr); \
1380  if (ret < 0) \
1381  return ret; \
1382 } while (0)
1383 
1384  SUBDIVIDE(x0, y0, 0);
1385  SUBDIVIDE(x1, y0, 1);
1386  SUBDIVIDE(x0, y1, 2);
1387  SUBDIVIDE(x1, y1, 3);
1388 
1389 #undef SUBDIVIDE
1390  } else {
1391  int min_tu_size = 1 << s->ps.sps->log2_min_tb_size;
1392  int log2_min_tu_size = s->ps.sps->log2_min_tb_size;
1393  int min_tu_width = s->ps.sps->min_tb_width;
1394  int cbf_luma = 1;
1395 
1396  if (lc->cu.pred_mode == MODE_INTRA || trafo_depth != 0 ||
1397  cbf_cb[0] || cbf_cr[0] ||
1398  (s->ps.sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1]))) {
1399  cbf_luma = ff_hevc_cbf_luma_decode(s, trafo_depth);
1400  }
1401 
1402  ret = hls_transform_unit(s, x0, y0, xBase, yBase, cb_xBase, cb_yBase,
1403  log2_cb_size, log2_trafo_size,
1404  blk_idx, cbf_luma, cbf_cb, cbf_cr);
1405  if (ret < 0)
1406  return ret;
1407  // TODO: store cbf_luma somewhere else
1408  if (cbf_luma) {
1409  int i, j;
1410  for (i = 0; i < (1 << log2_trafo_size); i += min_tu_size)
1411  for (j = 0; j < (1 << log2_trafo_size); j += min_tu_size) {
1412  int x_tu = (x0 + j) >> log2_min_tu_size;
1413  int y_tu = (y0 + i) >> log2_min_tu_size;
1414  s->cbf_luma[y_tu * min_tu_width + x_tu] = 1;
1415  }
1416  }
1417  if (!s->sh.disable_deblocking_filter_flag) {
1418  ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_trafo_size);
1419  if (s->ps.pps->transquant_bypass_enable_flag &&
1421  set_deblocking_bypass(s, x0, y0, log2_trafo_size);
1422  }
1423  }
1424  return 0;
1425 }
1426 
1427 static int hls_pcm_sample(HEVCContext *s, int x0, int y0, int log2_cb_size)
1428 {
1429  HEVCLocalContext *lc = s->HEVClc;
1430  GetBitContext gb;
1431  int cb_size = 1 << log2_cb_size;
1432  ptrdiff_t stride0 = s->frame->linesize[0];
1433  ptrdiff_t stride1 = s->frame->linesize[1];
1434  ptrdiff_t stride2 = s->frame->linesize[2];
1435  uint8_t *dst0 = &s->frame->data[0][y0 * stride0 + (x0 << s->ps.sps->pixel_shift)];
1436  uint8_t *dst1 = &s->frame->data[1][(y0 >> s->ps.sps->vshift[1]) * stride1 + ((x0 >> s->ps.sps->hshift[1]) << s->ps.sps->pixel_shift)];
1437  uint8_t *dst2 = &s->frame->data[2][(y0 >> s->ps.sps->vshift[2]) * stride2 + ((x0 >> s->ps.sps->hshift[2]) << s->ps.sps->pixel_shift)];
1438 
1439  int length = cb_size * cb_size * s->ps.sps->pcm.bit_depth +
1440  (((cb_size >> s->ps.sps->hshift[1]) * (cb_size >> s->ps.sps->vshift[1])) +
1441  ((cb_size >> s->ps.sps->hshift[2]) * (cb_size >> s->ps.sps->vshift[2]))) *
1442  s->ps.sps->pcm.bit_depth_chroma;
1443  const uint8_t *pcm = skip_bytes(&lc->cc, (length + 7) >> 3);
1444  int ret;
1445 
1446  if (!s->sh.disable_deblocking_filter_flag)
1447  ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_cb_size);
1448 
1449  ret = init_get_bits(&gb, pcm, length);
1450  if (ret < 0)
1451  return ret;
1452 
1453  s->hevcdsp.put_pcm(dst0, stride0, cb_size, cb_size, &gb, s->ps.sps->pcm.bit_depth);
1454  if (s->ps.sps->chroma_format_idc) {
1455  s->hevcdsp.put_pcm(dst1, stride1,
1456  cb_size >> s->ps.sps->hshift[1],
1457  cb_size >> s->ps.sps->vshift[1],
1458  &gb, s->ps.sps->pcm.bit_depth_chroma);
1459  s->hevcdsp.put_pcm(dst2, stride2,
1460  cb_size >> s->ps.sps->hshift[2],
1461  cb_size >> s->ps.sps->vshift[2],
1462  &gb, s->ps.sps->pcm.bit_depth_chroma);
1463  }
1464 
1465  return 0;
1466 }
1467 
1468 /**
1469  * 8.5.3.2.2.1 Luma sample unidirectional interpolation process
1470  *
1471  * @param s HEVC decoding context
1472  * @param dst target buffer for block data at block position
1473  * @param dststride stride of the dst buffer
1474  * @param ref reference picture buffer at origin (0, 0)
1475  * @param mv motion vector (relative to block position) to get pixel data from
1476  * @param x_off horizontal position of block from origin (0, 0)
1477  * @param y_off vertical position of block from origin (0, 0)
1478  * @param block_w width of block
1479  * @param block_h height of block
1480  * @param luma_weight weighting factor applied to the luma prediction
1481  * @param luma_offset additive offset applied to the luma prediction value
1482  */
1483 
1484 static void luma_mc_uni(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride,
1485  AVFrame *ref, const Mv *mv, int x_off, int y_off,
1486  int block_w, int block_h, int luma_weight, int luma_offset)
1487 {
1488  HEVCLocalContext *lc = s->HEVClc;
1489  uint8_t *src = ref->data[0];
1490  ptrdiff_t srcstride = ref->linesize[0];
1491  int pic_width = s->ps.sps->width;
1492  int pic_height = s->ps.sps->height;
1493  int mx = mv->x & 3;
1494  int my = mv->y & 3;
1495  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1496  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1497  int idx = ff_hevc_pel_weight[block_w];
1498 
1499  x_off += mv->x >> 2;
1500  y_off += mv->y >> 2;
1501  src += y_off * srcstride + (x_off * (1 << s->ps.sps->pixel_shift));
1502 
1503  if (x_off < QPEL_EXTRA_BEFORE || y_off < QPEL_EXTRA_AFTER ||
1504  x_off >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1505  y_off >= pic_height - block_h - QPEL_EXTRA_AFTER) {
1506  const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1507  int offset = QPEL_EXTRA_BEFORE * srcstride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1508  int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1509 
1510  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src - offset,
1511  edge_emu_stride, srcstride,
1512  block_w + QPEL_EXTRA,
1513  block_h + QPEL_EXTRA,
1514  x_off - QPEL_EXTRA_BEFORE, y_off - QPEL_EXTRA_BEFORE,
1515  pic_width, pic_height);
1516  src = lc->edge_emu_buffer + buf_offset;
1517  srcstride = edge_emu_stride;
1518  }
1519 
1520  if (!weight_flag)
1521  s->hevcdsp.put_hevc_qpel_uni[idx][!!my][!!mx](dst, dststride, src, srcstride,
1522  block_h, mx, my, block_w);
1523  else
1524  s->hevcdsp.put_hevc_qpel_uni_w[idx][!!my][!!mx](dst, dststride, src, srcstride,
1525  block_h, s->sh.luma_log2_weight_denom,
1526  luma_weight, luma_offset, mx, my, block_w);
1527 }
1528 
1529 /**
1530  * 8.5.3.2.2.1 Luma sample bidirectional interpolation process
1531  *
1532  * @param s HEVC decoding context
1533  * @param dst target buffer for block data at block position
1534  * @param dststride stride of the dst buffer
1535  * @param ref0 reference picture0 buffer at origin (0, 0)
1536  * @param mv0 motion vector0 (relative to block position) to get pixel data from
1537  * @param x_off horizontal position of block from origin (0, 0)
1538  * @param y_off vertical position of block from origin (0, 0)
1539  * @param block_w width of block
1540  * @param block_h height of block
1541  * @param ref1 reference picture1 buffer at origin (0, 0)
1542  * @param mv1 motion vector1 (relative to block position) to get pixel data from
1543  * @param current_mv current motion vector structure
1544  */
1545  static void luma_mc_bi(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride,
1546  AVFrame *ref0, const Mv *mv0, int x_off, int y_off,
1547  int block_w, int block_h, AVFrame *ref1, const Mv *mv1, struct MvField *current_mv)
1548 {
1549  HEVCLocalContext *lc = s->HEVClc;
1550  ptrdiff_t src0stride = ref0->linesize[0];
1551  ptrdiff_t src1stride = ref1->linesize[0];
1552  int pic_width = s->ps.sps->width;
1553  int pic_height = s->ps.sps->height;
1554  int mx0 = mv0->x & 3;
1555  int my0 = mv0->y & 3;
1556  int mx1 = mv1->x & 3;
1557  int my1 = mv1->y & 3;
1558  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1559  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1560  int x_off0 = x_off + (mv0->x >> 2);
1561  int y_off0 = y_off + (mv0->y >> 2);
1562  int x_off1 = x_off + (mv1->x >> 2);
1563  int y_off1 = y_off + (mv1->y >> 2);
1564  int idx = ff_hevc_pel_weight[block_w];
1565 
1566  uint8_t *src0 = ref0->data[0] + y_off0 * src0stride + (int)((unsigned)x_off0 << s->ps.sps->pixel_shift);
1567  uint8_t *src1 = ref1->data[0] + y_off1 * src1stride + (int)((unsigned)x_off1 << s->ps.sps->pixel_shift);
1568 
1569  if (x_off0 < QPEL_EXTRA_BEFORE || y_off0 < QPEL_EXTRA_AFTER ||
1570  x_off0 >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1571  y_off0 >= pic_height - block_h - QPEL_EXTRA_AFTER) {
1572  const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1573  int offset = QPEL_EXTRA_BEFORE * src0stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1574  int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1575 
1576  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src0 - offset,
1577  edge_emu_stride, src0stride,
1578  block_w + QPEL_EXTRA,
1579  block_h + QPEL_EXTRA,
1580  x_off0 - QPEL_EXTRA_BEFORE, y_off0 - QPEL_EXTRA_BEFORE,
1581  pic_width, pic_height);
1582  src0 = lc->edge_emu_buffer + buf_offset;
1583  src0stride = edge_emu_stride;
1584  }
1585 
1586  if (x_off1 < QPEL_EXTRA_BEFORE || y_off1 < QPEL_EXTRA_AFTER ||
1587  x_off1 >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1588  y_off1 >= pic_height - block_h - QPEL_EXTRA_AFTER) {
1589  const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1590  int offset = QPEL_EXTRA_BEFORE * src1stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1591  int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1592 
1593  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer2, src1 - offset,
1594  edge_emu_stride, src1stride,
1595  block_w + QPEL_EXTRA,
1596  block_h + QPEL_EXTRA,
1597  x_off1 - QPEL_EXTRA_BEFORE, y_off1 - QPEL_EXTRA_BEFORE,
1598  pic_width, pic_height);
1599  src1 = lc->edge_emu_buffer2 + buf_offset;
1600  src1stride = edge_emu_stride;
1601  }
1602 
1603  s->hevcdsp.put_hevc_qpel[idx][!!my0][!!mx0](lc->tmp, src0, src0stride,
1604  block_h, mx0, my0, block_w);
1605  if (!weight_flag)
1606  s->hevcdsp.put_hevc_qpel_bi[idx][!!my1][!!mx1](dst, dststride, src1, src1stride, lc->tmp,
1607  block_h, mx1, my1, block_w);
1608  else
1609  s->hevcdsp.put_hevc_qpel_bi_w[idx][!!my1][!!mx1](dst, dststride, src1, src1stride, lc->tmp,
1610  block_h, s->sh.luma_log2_weight_denom,
1611  s->sh.luma_weight_l0[current_mv->ref_idx[0]],
1612  s->sh.luma_weight_l1[current_mv->ref_idx[1]],
1613  s->sh.luma_offset_l0[current_mv->ref_idx[0]],
1614  s->sh.luma_offset_l1[current_mv->ref_idx[1]],
1615  mx1, my1, block_w);
1616 
1617 }
1618 
1619 /**
1620  * 8.5.3.2.2.2 Chroma sample uniprediction interpolation process
1621  *
1622  * @param s HEVC decoding context
1623  * @param dst1 target buffer for block data at block position (U plane)
1624  * @param dst2 target buffer for block data at block position (V plane)
1625  * @param dststride stride of the dst1 and dst2 buffers
1626  * @param ref reference picture buffer at origin (0, 0)
1627  * @param mv motion vector (relative to block position) to get pixel data from
1628  * @param x_off horizontal position of block from origin (0, 0)
1629  * @param y_off vertical position of block from origin (0, 0)
1630  * @param block_w width of block
1631  * @param block_h height of block
1632  * @param chroma_weight weighting factor applied to the chroma prediction
1633  * @param chroma_offset additive offset applied to the chroma prediction value
1634  */
1635 
1636 static void chroma_mc_uni(HEVCContext *s, uint8_t *dst0,
1637  ptrdiff_t dststride, uint8_t *src0, ptrdiff_t srcstride, int reflist,
1638  int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int chroma_weight, int chroma_offset)
1639 {
1640  HEVCLocalContext *lc = s->HEVClc;
1641  int pic_width = s->ps.sps->width >> s->ps.sps->hshift[1];
1642  int pic_height = s->ps.sps->height >> s->ps.sps->vshift[1];
1643  const Mv *mv = &current_mv->mv[reflist];
1644  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1645  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1646  int idx = ff_hevc_pel_weight[block_w];
1647  int hshift = s->ps.sps->hshift[1];
1648  int vshift = s->ps.sps->vshift[1];
1649  intptr_t mx = av_mod_uintp2(mv->x, 2 + hshift);
1650  intptr_t my = av_mod_uintp2(mv->y, 2 + vshift);
1651  intptr_t _mx = mx << (1 - hshift);
1652  intptr_t _my = my << (1 - vshift);
1653 
1654  x_off += mv->x >> (2 + hshift);
1655  y_off += mv->y >> (2 + vshift);
1656  src0 += y_off * srcstride + (x_off * (1 << s->ps.sps->pixel_shift));
1657 
1658  if (x_off < EPEL_EXTRA_BEFORE || y_off < EPEL_EXTRA_AFTER ||
1659  x_off >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1660  y_off >= pic_height - block_h - EPEL_EXTRA_AFTER) {
1661  const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1662  int offset0 = EPEL_EXTRA_BEFORE * (srcstride + (1 << s->ps.sps->pixel_shift));
1663  int buf_offset0 = EPEL_EXTRA_BEFORE *
1664  (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1665  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src0 - offset0,
1666  edge_emu_stride, srcstride,
1667  block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1668  x_off - EPEL_EXTRA_BEFORE,
1669  y_off - EPEL_EXTRA_BEFORE,
1670  pic_width, pic_height);
1671 
1672  src0 = lc->edge_emu_buffer + buf_offset0;
1673  srcstride = edge_emu_stride;
1674  }
1675  if (!weight_flag)
1676  s->hevcdsp.put_hevc_epel_uni[idx][!!my][!!mx](dst0, dststride, src0, srcstride,
1677  block_h, _mx, _my, block_w);
1678  else
1679  s->hevcdsp.put_hevc_epel_uni_w[idx][!!my][!!mx](dst0, dststride, src0, srcstride,
1680  block_h, s->sh.chroma_log2_weight_denom,
1681  chroma_weight, chroma_offset, _mx, _my, block_w);
1682 }
1683 
1684 /**
1685  * 8.5.3.2.2.2 Chroma sample bidirectional interpolation process
1686  *
1687  * @param s HEVC decoding context
1688  * @param dst target buffer for block data at block position
1689  * @param dststride stride of the dst buffer
1690  * @param ref0 reference picture0 buffer at origin (0, 0)
1691  * @param mv0 motion vector0 (relative to block position) to get pixel data from
1692  * @param x_off horizontal position of block from origin (0, 0)
1693  * @param y_off vertical position of block from origin (0, 0)
1694  * @param block_w width of block
1695  * @param block_h height of block
1696  * @param ref1 reference picture1 buffer at origin (0, 0)
1697  * @param mv1 motion vector1 (relative to block position) to get pixel data from
1698  * @param current_mv current motion vector structure
1699  * @param cidx chroma component(cb, cr)
1700  */
1701 static void chroma_mc_bi(HEVCContext *s, uint8_t *dst0, ptrdiff_t dststride, AVFrame *ref0, AVFrame *ref1,
1702  int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int cidx)
1703 {
1704  HEVCLocalContext *lc = s->HEVClc;
1705  uint8_t *src1 = ref0->data[cidx+1];
1706  uint8_t *src2 = ref1->data[cidx+1];
1707  ptrdiff_t src1stride = ref0->linesize[cidx+1];
1708  ptrdiff_t src2stride = ref1->linesize[cidx+1];
1709  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1710  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1711  int pic_width = s->ps.sps->width >> s->ps.sps->hshift[1];
1712  int pic_height = s->ps.sps->height >> s->ps.sps->vshift[1];
1713  Mv *mv0 = &current_mv->mv[0];
1714  Mv *mv1 = &current_mv->mv[1];
1715  int hshift = s->ps.sps->hshift[1];
1716  int vshift = s->ps.sps->vshift[1];
1717 
1718  intptr_t mx0 = av_mod_uintp2(mv0->x, 2 + hshift);
1719  intptr_t my0 = av_mod_uintp2(mv0->y, 2 + vshift);
1720  intptr_t mx1 = av_mod_uintp2(mv1->x, 2 + hshift);
1721  intptr_t my1 = av_mod_uintp2(mv1->y, 2 + vshift);
1722  intptr_t _mx0 = mx0 << (1 - hshift);
1723  intptr_t _my0 = my0 << (1 - vshift);
1724  intptr_t _mx1 = mx1 << (1 - hshift);
1725  intptr_t _my1 = my1 << (1 - vshift);
1726 
1727  int x_off0 = x_off + (mv0->x >> (2 + hshift));
1728  int y_off0 = y_off + (mv0->y >> (2 + vshift));
1729  int x_off1 = x_off + (mv1->x >> (2 + hshift));
1730  int y_off1 = y_off + (mv1->y >> (2 + vshift));
1731  int idx = ff_hevc_pel_weight[block_w];
1732  src1 += y_off0 * src1stride + (int)((unsigned)x_off0 << s->ps.sps->pixel_shift);
1733  src2 += y_off1 * src2stride + (int)((unsigned)x_off1 << s->ps.sps->pixel_shift);
1734 
1735  if (x_off0 < EPEL_EXTRA_BEFORE || y_off0 < EPEL_EXTRA_AFTER ||
1736  x_off0 >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1737  y_off0 >= pic_height - block_h - EPEL_EXTRA_AFTER) {
1738  const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1739  int offset1 = EPEL_EXTRA_BEFORE * (src1stride + (1 << s->ps.sps->pixel_shift));
1740  int buf_offset1 = EPEL_EXTRA_BEFORE *
1741  (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1742 
1743  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src1 - offset1,
1744  edge_emu_stride, src1stride,
1745  block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1746  x_off0 - EPEL_EXTRA_BEFORE,
1747  y_off0 - EPEL_EXTRA_BEFORE,
1748  pic_width, pic_height);
1749 
1750  src1 = lc->edge_emu_buffer + buf_offset1;
1751  src1stride = edge_emu_stride;
1752  }
1753 
1754  if (x_off1 < EPEL_EXTRA_BEFORE || y_off1 < EPEL_EXTRA_AFTER ||
1755  x_off1 >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1756  y_off1 >= pic_height - block_h - EPEL_EXTRA_AFTER) {
1757  const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1758  int offset1 = EPEL_EXTRA_BEFORE * (src2stride + (1 << s->ps.sps->pixel_shift));
1759  int buf_offset1 = EPEL_EXTRA_BEFORE *
1760  (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1761 
1762  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer2, src2 - offset1,
1763  edge_emu_stride, src2stride,
1764  block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1765  x_off1 - EPEL_EXTRA_BEFORE,
1766  y_off1 - EPEL_EXTRA_BEFORE,
1767  pic_width, pic_height);
1768 
1769  src2 = lc->edge_emu_buffer2 + buf_offset1;
1770  src2stride = edge_emu_stride;
1771  }
1772 
1773  s->hevcdsp.put_hevc_epel[idx][!!my0][!!mx0](lc->tmp, src1, src1stride,
1774  block_h, _mx0, _my0, block_w);
1775  if (!weight_flag)
1776  s->hevcdsp.put_hevc_epel_bi[idx][!!my1][!!mx1](dst0, s->frame->linesize[cidx+1],
1777  src2, src2stride, lc->tmp,
1778  block_h, _mx1, _my1, block_w);
1779  else
1780  s->hevcdsp.put_hevc_epel_bi_w[idx][!!my1][!!mx1](dst0, s->frame->linesize[cidx+1],
1781  src2, src2stride, lc->tmp,
1782  block_h,
1783  s->sh.chroma_log2_weight_denom,
1784  s->sh.chroma_weight_l0[current_mv->ref_idx[0]][cidx],
1785  s->sh.chroma_weight_l1[current_mv->ref_idx[1]][cidx],
1786  s->sh.chroma_offset_l0[current_mv->ref_idx[0]][cidx],
1787  s->sh.chroma_offset_l1[current_mv->ref_idx[1]][cidx],
1788  _mx1, _my1, block_w);
1789 }
1790 
1792  const Mv *mv, int y0, int height)
1793 {
1794  if (s->threads_type == FF_THREAD_FRAME ) {
1795  int y = FFMAX(0, (mv->y >> 2) + y0 + height + 9);
1796 
1797  ff_thread_await_progress(&ref->tf, y, 0);
1798  }
1799 }
1800 
1801 static void hevc_luma_mv_mvp_mode(HEVCContext *s, int x0, int y0, int nPbW,
1802  int nPbH, int log2_cb_size, int part_idx,
1803  int merge_idx, MvField *mv)
1804 {
1805  HEVCLocalContext *lc = s->HEVClc;
1806  enum InterPredIdc inter_pred_idc = PRED_L0;
1807  int mvp_flag;
1808 
1809  ff_hevc_set_neighbour_available(s, x0, y0, nPbW, nPbH);
1810  mv->pred_flag = 0;
1811  if (s->sh.slice_type == HEVC_SLICE_B)
1812  inter_pred_idc = ff_hevc_inter_pred_idc_decode(s, nPbW, nPbH);
1813 
1814  if (inter_pred_idc != PRED_L1) {
1815  if (s->sh.nb_refs[L0])
1816  mv->ref_idx[0]= ff_hevc_ref_idx_lx_decode(s, s->sh.nb_refs[L0]);
1817 
1818  mv->pred_flag = PF_L0;
1819  ff_hevc_hls_mvd_coding(s, x0, y0, 0);
1820  mvp_flag = ff_hevc_mvp_lx_flag_decode(s);
1821  ff_hevc_luma_mv_mvp_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
1822  part_idx, merge_idx, mv, mvp_flag, 0);
1823  mv->mv[0].x += lc->pu.mvd.x;
1824  mv->mv[0].y += lc->pu.mvd.y;
1825  }
1826 
1827  if (inter_pred_idc != PRED_L0) {
1828  if (s->sh.nb_refs[L1])
1829  mv->ref_idx[1]= ff_hevc_ref_idx_lx_decode(s, s->sh.nb_refs[L1]);
1830 
1831  if (s->sh.mvd_l1_zero_flag == 1 && inter_pred_idc == PRED_BI) {
1832  AV_ZERO32(&lc->pu.mvd);
1833  } else {
1834  ff_hevc_hls_mvd_coding(s, x0, y0, 1);
1835  }
1836 
1837  mv->pred_flag += PF_L1;
1838  mvp_flag = ff_hevc_mvp_lx_flag_decode(s);
1839  ff_hevc_luma_mv_mvp_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
1840  part_idx, merge_idx, mv, mvp_flag, 1);
1841  mv->mv[1].x += lc->pu.mvd.x;
1842  mv->mv[1].y += lc->pu.mvd.y;
1843  }
1844 }
1845 
1846 static void hls_prediction_unit(HEVCContext *s, int x0, int y0,
1847  int nPbW, int nPbH,
1848  int log2_cb_size, int partIdx, int idx)
1849 {
1850 #define POS(c_idx, x, y) \
1851  &s->frame->data[c_idx][((y) >> s->ps.sps->vshift[c_idx]) * s->frame->linesize[c_idx] + \
1852  (((x) >> s->ps.sps->hshift[c_idx]) << s->ps.sps->pixel_shift)]
1853  HEVCLocalContext *lc = s->HEVClc;
1854  int merge_idx = 0;
1855  struct MvField current_mv = {{{ 0 }}};
1856 
1857  int min_pu_width = s->ps.sps->min_pu_width;
1858 
1859  MvField *tab_mvf = s->ref->tab_mvf;
1860  RefPicList *refPicList = s->ref->refPicList;
1861  HEVCFrame *ref0 = NULL, *ref1 = NULL;
1862  uint8_t *dst0 = POS(0, x0, y0);
1863  uint8_t *dst1 = POS(1, x0, y0);
1864  uint8_t *dst2 = POS(2, x0, y0);
1865  int log2_min_cb_size = s->ps.sps->log2_min_cb_size;
1866  int min_cb_width = s->ps.sps->min_cb_width;
1867  int x_cb = x0 >> log2_min_cb_size;
1868  int y_cb = y0 >> log2_min_cb_size;
1869  int x_pu, y_pu;
1870  int i, j;
1871 
1872  int skip_flag = SAMPLE_CTB(s->skip_flag, x_cb, y_cb);
1873 
1874  if (!skip_flag)
1876 
1877  if (skip_flag || lc->pu.merge_flag) {
1878  if (s->sh.max_num_merge_cand > 1)
1879  merge_idx = ff_hevc_merge_idx_decode(s);
1880  else
1881  merge_idx = 0;
1882 
1883  ff_hevc_luma_mv_merge_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
1884  partIdx, merge_idx, &current_mv);
1885  } else {
1886  hevc_luma_mv_mvp_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
1887  partIdx, merge_idx, &current_mv);
1888  }
1889 
1890  x_pu = x0 >> s->ps.sps->log2_min_pu_size;
1891  y_pu = y0 >> s->ps.sps->log2_min_pu_size;
1892 
1893  for (j = 0; j < nPbH >> s->ps.sps->log2_min_pu_size; j++)
1894  for (i = 0; i < nPbW >> s->ps.sps->log2_min_pu_size; i++)
1895  tab_mvf[(y_pu + j) * min_pu_width + x_pu + i] = current_mv;
1896 
1897  if (current_mv.pred_flag & PF_L0) {
1898  ref0 = refPicList[0].ref[current_mv.ref_idx[0]];
1899  if (!ref0)
1900  return;
1901  hevc_await_progress(s, ref0, &current_mv.mv[0], y0, nPbH);
1902  }
1903  if (current_mv.pred_flag & PF_L1) {
1904  ref1 = refPicList[1].ref[current_mv.ref_idx[1]];
1905  if (!ref1)
1906  return;
1907  hevc_await_progress(s, ref1, &current_mv.mv[1], y0, nPbH);
1908  }
1909 
1910  if (current_mv.pred_flag == PF_L0) {
1911  int x0_c = x0 >> s->ps.sps->hshift[1];
1912  int y0_c = y0 >> s->ps.sps->vshift[1];
1913  int nPbW_c = nPbW >> s->ps.sps->hshift[1];
1914  int nPbH_c = nPbH >> s->ps.sps->vshift[1];
1915 
1916  luma_mc_uni(s, dst0, s->frame->linesize[0], ref0->frame,
1917  &current_mv.mv[0], x0, y0, nPbW, nPbH,
1918  s->sh.luma_weight_l0[current_mv.ref_idx[0]],
1919  s->sh.luma_offset_l0[current_mv.ref_idx[0]]);
1920 
1921  if (s->ps.sps->chroma_format_idc) {
1922  chroma_mc_uni(s, dst1, s->frame->linesize[1], ref0->frame->data[1], ref0->frame->linesize[1],
1923  0, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1924  s->sh.chroma_weight_l0[current_mv.ref_idx[0]][0], s->sh.chroma_offset_l0[current_mv.ref_idx[0]][0]);
1925  chroma_mc_uni(s, dst2, s->frame->linesize[2], ref0->frame->data[2], ref0->frame->linesize[2],
1926  0, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1927  s->sh.chroma_weight_l0[current_mv.ref_idx[0]][1], s->sh.chroma_offset_l0[current_mv.ref_idx[0]][1]);
1928  }
1929  } else if (current_mv.pred_flag == PF_L1) {
1930  int x0_c = x0 >> s->ps.sps->hshift[1];
1931  int y0_c = y0 >> s->ps.sps->vshift[1];
1932  int nPbW_c = nPbW >> s->ps.sps->hshift[1];
1933  int nPbH_c = nPbH >> s->ps.sps->vshift[1];
1934 
1935  luma_mc_uni(s, dst0, s->frame->linesize[0], ref1->frame,
1936  &current_mv.mv[1], x0, y0, nPbW, nPbH,
1937  s->sh.luma_weight_l1[current_mv.ref_idx[1]],
1938  s->sh.luma_offset_l1[current_mv.ref_idx[1]]);
1939 
1940  if (s->ps.sps->chroma_format_idc) {
1941  chroma_mc_uni(s, dst1, s->frame->linesize[1], ref1->frame->data[1], ref1->frame->linesize[1],
1942  1, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1943  s->sh.chroma_weight_l1[current_mv.ref_idx[1]][0], s->sh.chroma_offset_l1[current_mv.ref_idx[1]][0]);
1944 
1945  chroma_mc_uni(s, dst2, s->frame->linesize[2], ref1->frame->data[2], ref1->frame->linesize[2],
1946  1, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1947  s->sh.chroma_weight_l1[current_mv.ref_idx[1]][1], s->sh.chroma_offset_l1[current_mv.ref_idx[1]][1]);
1948  }
1949  } else if (current_mv.pred_flag == PF_BI) {
1950  int x0_c = x0 >> s->ps.sps->hshift[1];
1951  int y0_c = y0 >> s->ps.sps->vshift[1];
1952  int nPbW_c = nPbW >> s->ps.sps->hshift[1];
1953  int nPbH_c = nPbH >> s->ps.sps->vshift[1];
1954 
1955  luma_mc_bi(s, dst0, s->frame->linesize[0], ref0->frame,
1956  &current_mv.mv[0], x0, y0, nPbW, nPbH,
1957  ref1->frame, &current_mv.mv[1], &current_mv);
1958 
1959  if (s->ps.sps->chroma_format_idc) {
1960  chroma_mc_bi(s, dst1, s->frame->linesize[1], ref0->frame, ref1->frame,
1961  x0_c, y0_c, nPbW_c, nPbH_c, &current_mv, 0);
1962 
1963  chroma_mc_bi(s, dst2, s->frame->linesize[2], ref0->frame, ref1->frame,
1964  x0_c, y0_c, nPbW_c, nPbH_c, &current_mv, 1);
1965  }
1966  }
1967 }
1968 
1969 /**
1970  * 8.4.1
1971  */
1972 static int luma_intra_pred_mode(HEVCContext *s, int x0, int y0, int pu_size,
1973  int prev_intra_luma_pred_flag)
1974 {
1975  HEVCLocalContext *lc = s->HEVClc;
1976  int x_pu = x0 >> s->ps.sps->log2_min_pu_size;
1977  int y_pu = y0 >> s->ps.sps->log2_min_pu_size;
1978  int min_pu_width = s->ps.sps->min_pu_width;
1979  int size_in_pus = pu_size >> s->ps.sps->log2_min_pu_size;
1980  int x0b = av_mod_uintp2(x0, s->ps.sps->log2_ctb_size);
1981  int y0b = av_mod_uintp2(y0, s->ps.sps->log2_ctb_size);
1982 
1983  int cand_up = (lc->ctb_up_flag || y0b) ?
1984  s->tab_ipm[(y_pu - 1) * min_pu_width + x_pu] : INTRA_DC;
1985  int cand_left = (lc->ctb_left_flag || x0b) ?
1986  s->tab_ipm[y_pu * min_pu_width + x_pu - 1] : INTRA_DC;
1987 
1988  int y_ctb = (y0 >> (s->ps.sps->log2_ctb_size)) << (s->ps.sps->log2_ctb_size);
1989 
1990  MvField *tab_mvf = s->ref->tab_mvf;
1991  int intra_pred_mode;
1992  int candidate[3];
1993  int i, j;
1994 
1995  // intra_pred_mode prediction does not cross vertical CTB boundaries
1996  if ((y0 - 1) < y_ctb)
1997  cand_up = INTRA_DC;
1998 
1999  if (cand_left == cand_up) {
2000  if (cand_left < 2) {
2001  candidate[0] = INTRA_PLANAR;
2002  candidate[1] = INTRA_DC;
2003  candidate[2] = INTRA_ANGULAR_26;
2004  } else {
2005  candidate[0] = cand_left;
2006  candidate[1] = 2 + ((cand_left - 2 - 1 + 32) & 31);
2007  candidate[2] = 2 + ((cand_left - 2 + 1) & 31);
2008  }
2009  } else {
2010  candidate[0] = cand_left;
2011  candidate[1] = cand_up;
2012  if (candidate[0] != INTRA_PLANAR && candidate[1] != INTRA_PLANAR) {
2013  candidate[2] = INTRA_PLANAR;
2014  } else if (candidate[0] != INTRA_DC && candidate[1] != INTRA_DC) {
2015  candidate[2] = INTRA_DC;
2016  } else {
2017  candidate[2] = INTRA_ANGULAR_26;
2018  }
2019  }
2020 
2021  if (prev_intra_luma_pred_flag) {
2022  intra_pred_mode = candidate[lc->pu.mpm_idx];
2023  } else {
2024  if (candidate[0] > candidate[1])
2025  FFSWAP(uint8_t, candidate[0], candidate[1]);
2026  if (candidate[0] > candidate[2])
2027  FFSWAP(uint8_t, candidate[0], candidate[2]);
2028  if (candidate[1] > candidate[2])
2029  FFSWAP(uint8_t, candidate[1], candidate[2]);
2030 
2031  intra_pred_mode = lc->pu.rem_intra_luma_pred_mode;
2032  for (i = 0; i < 3; i++)
2033  if (intra_pred_mode >= candidate[i])
2034  intra_pred_mode++;
2035  }
2036 
2037  /* write the intra prediction units into the mv array */
2038  if (!size_in_pus)
2039  size_in_pus = 1;
2040  for (i = 0; i < size_in_pus; i++) {
2041  memset(&s->tab_ipm[(y_pu + i) * min_pu_width + x_pu],
2042  intra_pred_mode, size_in_pus);
2043 
2044  for (j = 0; j < size_in_pus; j++) {
2045  tab_mvf[(y_pu + j) * min_pu_width + x_pu + i].pred_flag = PF_INTRA;
2046  }
2047  }
2048 
2049  return intra_pred_mode;
2050 }
2051 
2052 static av_always_inline void set_ct_depth(HEVCContext *s, int x0, int y0,
2053  int log2_cb_size, int ct_depth)
2054 {
2055  int length = (1 << log2_cb_size) >> s->ps.sps->log2_min_cb_size;
2056  int x_cb = x0 >> s->ps.sps->log2_min_cb_size;
2057  int y_cb = y0 >> s->ps.sps->log2_min_cb_size;
2058  int y;
2059 
2060  for (y = 0; y < length; y++)
2061  memset(&s->tab_ct_depth[(y_cb + y) * s->ps.sps->min_cb_width + x_cb],
2062  ct_depth, length);
2063 }
2064 
2065 static const uint8_t tab_mode_idx[] = {
2066  0, 1, 2, 2, 2, 2, 3, 5, 7, 8, 10, 12, 13, 15, 17, 18, 19, 20,
2067  21, 22, 23, 23, 24, 24, 25, 25, 26, 27, 27, 28, 28, 29, 29, 30, 31};
2068 
2069 static void intra_prediction_unit(HEVCContext *s, int x0, int y0,
2070  int log2_cb_size)
2071 {
2072  HEVCLocalContext *lc = s->HEVClc;
2073  static const uint8_t intra_chroma_table[4] = { 0, 26, 10, 1 };
2074  uint8_t prev_intra_luma_pred_flag[4];
2075  int split = lc->cu.part_mode == PART_NxN;
2076  int pb_size = (1 << log2_cb_size) >> split;
2077  int side = split + 1;
2078  int chroma_mode;
2079  int i, j;
2080 
2081  for (i = 0; i < side; i++)
2082  for (j = 0; j < side; j++)
2083  prev_intra_luma_pred_flag[2 * i + j] = ff_hevc_prev_intra_luma_pred_flag_decode(s);
2084 
2085  for (i = 0; i < side; i++) {
2086  for (j = 0; j < side; j++) {
2087  if (prev_intra_luma_pred_flag[2 * i + j])
2089  else
2091 
2092  lc->pu.intra_pred_mode[2 * i + j] =
2093  luma_intra_pred_mode(s, x0 + pb_size * j, y0 + pb_size * i, pb_size,
2094  prev_intra_luma_pred_flag[2 * i + j]);
2095  }
2096  }
2097 
2098  if (s->ps.sps->chroma_format_idc == 3) {
2099  for (i = 0; i < side; i++) {
2100  for (j = 0; j < side; j++) {
2101  lc->pu.chroma_mode_c[2 * i + j] = chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(s);
2102  if (chroma_mode != 4) {
2103  if (lc->pu.intra_pred_mode[2 * i + j] == intra_chroma_table[chroma_mode])
2104  lc->pu.intra_pred_mode_c[2 * i + j] = 34;
2105  else
2106  lc->pu.intra_pred_mode_c[2 * i + j] = intra_chroma_table[chroma_mode];
2107  } else {
2108  lc->pu.intra_pred_mode_c[2 * i + j] = lc->pu.intra_pred_mode[2 * i + j];
2109  }
2110  }
2111  }
2112  } else if (s->ps.sps->chroma_format_idc == 2) {
2113  int mode_idx;
2114  lc->pu.chroma_mode_c[0] = chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(s);
2115  if (chroma_mode != 4) {
2116  if (lc->pu.intra_pred_mode[0] == intra_chroma_table[chroma_mode])
2117  mode_idx = 34;
2118  else
2119  mode_idx = intra_chroma_table[chroma_mode];
2120  } else {
2121  mode_idx = lc->pu.intra_pred_mode[0];
2122  }
2123  lc->pu.intra_pred_mode_c[0] = tab_mode_idx[mode_idx];
2124  } else if (s->ps.sps->chroma_format_idc != 0) {
2126  if (chroma_mode != 4) {
2127  if (lc->pu.intra_pred_mode[0] == intra_chroma_table[chroma_mode])
2128  lc->pu.intra_pred_mode_c[0] = 34;
2129  else
2130  lc->pu.intra_pred_mode_c[0] = intra_chroma_table[chroma_mode];
2131  } else {
2132  lc->pu.intra_pred_mode_c[0] = lc->pu.intra_pred_mode[0];
2133  }
2134  }
2135 }
2136 
2138  int x0, int y0,
2139  int log2_cb_size)
2140 {
2141  HEVCLocalContext *lc = s->HEVClc;
2142  int pb_size = 1 << log2_cb_size;
2143  int size_in_pus = pb_size >> s->ps.sps->log2_min_pu_size;
2144  int min_pu_width = s->ps.sps->min_pu_width;
2145  MvField *tab_mvf = s->ref->tab_mvf;
2146  int x_pu = x0 >> s->ps.sps->log2_min_pu_size;
2147  int y_pu = y0 >> s->ps.sps->log2_min_pu_size;
2148  int j, k;
2149 
2150  if (size_in_pus == 0)
2151  size_in_pus = 1;
2152  for (j = 0; j < size_in_pus; j++)
2153  memset(&s->tab_ipm[(y_pu + j) * min_pu_width + x_pu], INTRA_DC, size_in_pus);
2154  if (lc->cu.pred_mode == MODE_INTRA)
2155  for (j = 0; j < size_in_pus; j++)
2156  for (k = 0; k < size_in_pus; k++)
2157  tab_mvf[(y_pu + j) * min_pu_width + x_pu + k].pred_flag = PF_INTRA;
2158 }
2159 
2160 static int hls_coding_unit(HEVCContext *s, int x0, int y0, int log2_cb_size)
2161 {
2162  int cb_size = 1 << log2_cb_size;
2163  HEVCLocalContext *lc = s->HEVClc;
2164  int log2_min_cb_size = s->ps.sps->log2_min_cb_size;
2165  int length = cb_size >> log2_min_cb_size;
2166  int min_cb_width = s->ps.sps->min_cb_width;
2167  int x_cb = x0 >> log2_min_cb_size;
2168  int y_cb = y0 >> log2_min_cb_size;
2169  int idx = log2_cb_size - 2;
2170  int qp_block_mask = (1<<(s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth)) - 1;
2171  int x, y, ret;
2172 
2173  lc->cu.x = x0;
2174  lc->cu.y = y0;
2175  lc->cu.pred_mode = MODE_INTRA;
2176  lc->cu.part_mode = PART_2Nx2N;
2177  lc->cu.intra_split_flag = 0;
2178 
2179  SAMPLE_CTB(s->skip_flag, x_cb, y_cb) = 0;
2180  for (x = 0; x < 4; x++)
2181  lc->pu.intra_pred_mode[x] = 1;
2182  if (s->ps.pps->transquant_bypass_enable_flag) {
2184  if (lc->cu.cu_transquant_bypass_flag)
2185  set_deblocking_bypass(s, x0, y0, log2_cb_size);
2186  } else
2187  lc->cu.cu_transquant_bypass_flag = 0;
2188 
2189  if (s->sh.slice_type != HEVC_SLICE_I) {
2190  uint8_t skip_flag = ff_hevc_skip_flag_decode(s, x0, y0, x_cb, y_cb);
2191 
2192  x = y_cb * min_cb_width + x_cb;
2193  for (y = 0; y < length; y++) {
2194  memset(&s->skip_flag[x], skip_flag, length);
2195  x += min_cb_width;
2196  }
2197  lc->cu.pred_mode = skip_flag ? MODE_SKIP : MODE_INTER;
2198  } else {
2199  x = y_cb * min_cb_width + x_cb;
2200  for (y = 0; y < length; y++) {
2201  memset(&s->skip_flag[x], 0, length);
2202  x += min_cb_width;
2203  }
2204  }
2205 
2206  if (SAMPLE_CTB(s->skip_flag, x_cb, y_cb)) {
2207  hls_prediction_unit(s, x0, y0, cb_size, cb_size, log2_cb_size, 0, idx);
2208  intra_prediction_unit_default_value(s, x0, y0, log2_cb_size);
2209 
2210  if (!s->sh.disable_deblocking_filter_flag)
2211  ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_cb_size);
2212  } else {
2213  int pcm_flag = 0;
2214 
2215  if (s->sh.slice_type != HEVC_SLICE_I)
2217  if (lc->cu.pred_mode != MODE_INTRA ||
2218  log2_cb_size == s->ps.sps->log2_min_cb_size) {
2219  lc->cu.part_mode = ff_hevc_part_mode_decode(s, log2_cb_size);
2220  lc->cu.intra_split_flag = lc->cu.part_mode == PART_NxN &&
2221  lc->cu.pred_mode == MODE_INTRA;
2222  }
2223 
2224  if (lc->cu.pred_mode == MODE_INTRA) {
2225  if (lc->cu.part_mode == PART_2Nx2N && s->ps.sps->pcm_enabled_flag &&
2226  log2_cb_size >= s->ps.sps->pcm.log2_min_pcm_cb_size &&
2227  log2_cb_size <= s->ps.sps->pcm.log2_max_pcm_cb_size) {
2228  pcm_flag = ff_hevc_pcm_flag_decode(s);
2229  }
2230  if (pcm_flag) {
2231  intra_prediction_unit_default_value(s, x0, y0, log2_cb_size);
2232  ret = hls_pcm_sample(s, x0, y0, log2_cb_size);
2233  if (s->ps.sps->pcm.loop_filter_disable_flag)
2234  set_deblocking_bypass(s, x0, y0, log2_cb_size);
2235 
2236  if (ret < 0)
2237  return ret;
2238  } else {
2239  intra_prediction_unit(s, x0, y0, log2_cb_size);
2240  }
2241  } else {
2242  intra_prediction_unit_default_value(s, x0, y0, log2_cb_size);
2243  switch (lc->cu.part_mode) {
2244  case PART_2Nx2N:
2245  hls_prediction_unit(s, x0, y0, cb_size, cb_size, log2_cb_size, 0, idx);
2246  break;
2247  case PART_2NxN:
2248  hls_prediction_unit(s, x0, y0, cb_size, cb_size / 2, log2_cb_size, 0, idx);
2249  hls_prediction_unit(s, x0, y0 + cb_size / 2, cb_size, cb_size / 2, log2_cb_size, 1, idx);
2250  break;
2251  case PART_Nx2N:
2252  hls_prediction_unit(s, x0, y0, cb_size / 2, cb_size, log2_cb_size, 0, idx - 1);
2253  hls_prediction_unit(s, x0 + cb_size / 2, y0, cb_size / 2, cb_size, log2_cb_size, 1, idx - 1);
2254  break;
2255  case PART_2NxnU:
2256  hls_prediction_unit(s, x0, y0, cb_size, cb_size / 4, log2_cb_size, 0, idx);
2257  hls_prediction_unit(s, x0, y0 + cb_size / 4, cb_size, cb_size * 3 / 4, log2_cb_size, 1, idx);
2258  break;
2259  case PART_2NxnD:
2260  hls_prediction_unit(s, x0, y0, cb_size, cb_size * 3 / 4, log2_cb_size, 0, idx);
2261  hls_prediction_unit(s, x0, y0 + cb_size * 3 / 4, cb_size, cb_size / 4, log2_cb_size, 1, idx);
2262  break;
2263  case PART_nLx2N:
2264  hls_prediction_unit(s, x0, y0, cb_size / 4, cb_size, log2_cb_size, 0, idx - 2);
2265  hls_prediction_unit(s, x0 + cb_size / 4, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 1, idx - 2);
2266  break;
2267  case PART_nRx2N:
2268  hls_prediction_unit(s, x0, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 0, idx - 2);
2269  hls_prediction_unit(s, x0 + cb_size * 3 / 4, y0, cb_size / 4, cb_size, log2_cb_size, 1, idx - 2);
2270  break;
2271  case PART_NxN:
2272  hls_prediction_unit(s, x0, y0, cb_size / 2, cb_size / 2, log2_cb_size, 0, idx - 1);
2273  hls_prediction_unit(s, x0 + cb_size / 2, y0, cb_size / 2, cb_size / 2, log2_cb_size, 1, idx - 1);
2274  hls_prediction_unit(s, x0, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 2, idx - 1);
2275  hls_prediction_unit(s, x0 + cb_size / 2, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 3, idx - 1);
2276  break;
2277  }
2278  }
2279 
2280  if (!pcm_flag) {
2281  int rqt_root_cbf = 1;
2282 
2283  if (lc->cu.pred_mode != MODE_INTRA &&
2284  !(lc->cu.part_mode == PART_2Nx2N && lc->pu.merge_flag)) {
2285  rqt_root_cbf = ff_hevc_no_residual_syntax_flag_decode(s);
2286  }
2287  if (rqt_root_cbf) {
2288  const static int cbf[2] = { 0 };
2289  lc->cu.max_trafo_depth = lc->cu.pred_mode == MODE_INTRA ?
2290  s->ps.sps->max_transform_hierarchy_depth_intra + lc->cu.intra_split_flag :
2291  s->ps.sps->max_transform_hierarchy_depth_inter;
2292  ret = hls_transform_tree(s, x0, y0, x0, y0, x0, y0,
2293  log2_cb_size,
2294  log2_cb_size, 0, 0, cbf, cbf);
2295  if (ret < 0)
2296  return ret;
2297  } else {
2298  if (!s->sh.disable_deblocking_filter_flag)
2299  ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_cb_size);
2300  }
2301  }
2302  }
2303 
2304  if (s->ps.pps->cu_qp_delta_enabled_flag && lc->tu.is_cu_qp_delta_coded == 0)
2305  ff_hevc_set_qPy(s, x0, y0, log2_cb_size);
2306 
2307  x = y_cb * min_cb_width + x_cb;
2308  for (y = 0; y < length; y++) {
2309  memset(&s->qp_y_tab[x], lc->qp_y, length);
2310  x += min_cb_width;
2311  }
2312 
2313  if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
2314  ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0) {
2315  lc->qPy_pred = lc->qp_y;
2316  }
2317 
2318  set_ct_depth(s, x0, y0, log2_cb_size, lc->ct_depth);
2319 
2320  return 0;
2321 }
2322 
2323 static int hls_coding_quadtree(HEVCContext *s, int x0, int y0,
2324  int log2_cb_size, int cb_depth)
2325 {
2326  HEVCLocalContext *lc = s->HEVClc;
2327  const int cb_size = 1 << log2_cb_size;
2328  int ret;
2329  int split_cu;
2330 
2331  lc->ct_depth = cb_depth;
2332  if (x0 + cb_size <= s->ps.sps->width &&
2333  y0 + cb_size <= s->ps.sps->height &&
2334  log2_cb_size > s->ps.sps->log2_min_cb_size) {
2335  split_cu = ff_hevc_split_coding_unit_flag_decode(s, cb_depth, x0, y0);
2336  } else {
2337  split_cu = (log2_cb_size > s->ps.sps->log2_min_cb_size);
2338  }
2339  if (s->ps.pps->cu_qp_delta_enabled_flag &&
2340  log2_cb_size >= s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth) {
2341  lc->tu.is_cu_qp_delta_coded = 0;
2342  lc->tu.cu_qp_delta = 0;
2343  }
2344 
2345  if (s->sh.cu_chroma_qp_offset_enabled_flag &&
2346  log2_cb_size >= s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_chroma_qp_offset_depth) {
2348  }
2349 
2350  if (split_cu) {
2351  int qp_block_mask = (1<<(s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth)) - 1;
2352  const int cb_size_split = cb_size >> 1;
2353  const int x1 = x0 + cb_size_split;
2354  const int y1 = y0 + cb_size_split;
2355 
2356  int more_data = 0;
2357 
2358  more_data = hls_coding_quadtree(s, x0, y0, log2_cb_size - 1, cb_depth + 1);
2359  if (more_data < 0)
2360  return more_data;
2361 
2362  if (more_data && x1 < s->ps.sps->width) {
2363  more_data = hls_coding_quadtree(s, x1, y0, log2_cb_size - 1, cb_depth + 1);
2364  if (more_data < 0)
2365  return more_data;
2366  }
2367  if (more_data && y1 < s->ps.sps->height) {
2368  more_data = hls_coding_quadtree(s, x0, y1, log2_cb_size - 1, cb_depth + 1);
2369  if (more_data < 0)
2370  return more_data;
2371  }
2372  if (more_data && x1 < s->ps.sps->width &&
2373  y1 < s->ps.sps->height) {
2374  more_data = hls_coding_quadtree(s, x1, y1, log2_cb_size - 1, cb_depth + 1);
2375  if (more_data < 0)
2376  return more_data;
2377  }
2378 
2379  if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
2380  ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0)
2381  lc->qPy_pred = lc->qp_y;
2382 
2383  if (more_data)
2384  return ((x1 + cb_size_split) < s->ps.sps->width ||
2385  (y1 + cb_size_split) < s->ps.sps->height);
2386  else
2387  return 0;
2388  } else {
2389  ret = hls_coding_unit(s, x0, y0, log2_cb_size);
2390  if (ret < 0)
2391  return ret;
2392  if ((!((x0 + cb_size) %
2393  (1 << (s->ps.sps->log2_ctb_size))) ||
2394  (x0 + cb_size >= s->ps.sps->width)) &&
2395  (!((y0 + cb_size) %
2396  (1 << (s->ps.sps->log2_ctb_size))) ||
2397  (y0 + cb_size >= s->ps.sps->height))) {
2398  int end_of_slice_flag = ff_hevc_end_of_slice_flag_decode(s);
2399  return !end_of_slice_flag;
2400  } else {
2401  return 1;
2402  }
2403  }
2404 
2405  return 0;
2406 }
2407 
2408 static void hls_decode_neighbour(HEVCContext *s, int x_ctb, int y_ctb,
2409  int ctb_addr_ts)
2410 {
2411  HEVCLocalContext *lc = s->HEVClc;
2412  int ctb_size = 1 << s->ps.sps->log2_ctb_size;
2413  int ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2414  int ctb_addr_in_slice = ctb_addr_rs - s->sh.slice_addr;
2415 
2416  s->tab_slice_address[ctb_addr_rs] = s->sh.slice_addr;
2417 
2418  if (s->ps.pps->entropy_coding_sync_enabled_flag) {
2419  if (x_ctb == 0 && (y_ctb & (ctb_size - 1)) == 0)
2420  lc->first_qp_group = 1;
2421  lc->end_of_tiles_x = s->ps.sps->width;
2422  } else if (s->ps.pps->tiles_enabled_flag) {
2423  if (ctb_addr_ts && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[ctb_addr_ts - 1]) {
2424  int idxX = s->ps.pps->col_idxX[x_ctb >> s->ps.sps->log2_ctb_size];
2425  lc->end_of_tiles_x = x_ctb + (s->ps.pps->column_width[idxX] << s->ps.sps->log2_ctb_size);
2426  lc->first_qp_group = 1;
2427  }
2428  } else {
2429  lc->end_of_tiles_x = s->ps.sps->width;
2430  }
2431 
2432  lc->end_of_tiles_y = FFMIN(y_ctb + ctb_size, s->ps.sps->height);
2433 
2434  lc->boundary_flags = 0;
2435  if (s->ps.pps->tiles_enabled_flag) {
2436  if (x_ctb > 0 && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - 1]])
2438  if (x_ctb > 0 && s->tab_slice_address[ctb_addr_rs] != s->tab_slice_address[ctb_addr_rs - 1])
2440  if (y_ctb > 0 && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - s->ps.sps->ctb_width]])
2442  if (y_ctb > 0 && s->tab_slice_address[ctb_addr_rs] != s->tab_slice_address[ctb_addr_rs - s->ps.sps->ctb_width])
2444  } else {
2445  if (ctb_addr_in_slice <= 0)
2447  if (ctb_addr_in_slice < s->ps.sps->ctb_width)
2449  }
2450 
2451  lc->ctb_left_flag = ((x_ctb > 0) && (ctb_addr_in_slice > 0) && !(lc->boundary_flags & BOUNDARY_LEFT_TILE));
2452  lc->ctb_up_flag = ((y_ctb > 0) && (ctb_addr_in_slice >= s->ps.sps->ctb_width) && !(lc->boundary_flags & BOUNDARY_UPPER_TILE));
2453  lc->ctb_up_right_flag = ((y_ctb > 0) && (ctb_addr_in_slice+1 >= s->ps.sps->ctb_width) && (s->ps.pps->tile_id[ctb_addr_ts] == s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs+1 - s->ps.sps->ctb_width]]));
2454  lc->ctb_up_left_flag = ((x_ctb > 0) && (y_ctb > 0) && (ctb_addr_in_slice-1 >= s->ps.sps->ctb_width) && (s->ps.pps->tile_id[ctb_addr_ts] == s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs-1 - s->ps.sps->ctb_width]]));
2455 }
2456 
2457 static int hls_decode_entry(AVCodecContext *avctxt, void *isFilterThread)
2458 {
2459  HEVCContext *s = avctxt->priv_data;
2460  int ctb_size = 1 << s->ps.sps->log2_ctb_size;
2461  int more_data = 1;
2462  int x_ctb = 0;
2463  int y_ctb = 0;
2464  int ctb_addr_ts = s->ps.pps->ctb_addr_rs_to_ts[s->sh.slice_ctb_addr_rs];
2465  int ret;
2466 
2467  if (!ctb_addr_ts && s->sh.dependent_slice_segment_flag) {
2468  av_log(s->avctx, AV_LOG_ERROR, "Impossible initial tile.\n");
2469  return AVERROR_INVALIDDATA;
2470  }
2471 
2472  if (s->sh.dependent_slice_segment_flag) {
2473  int prev_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts - 1];
2474  if (s->tab_slice_address[prev_rs] != s->sh.slice_addr) {
2475  av_log(s->avctx, AV_LOG_ERROR, "Previous slice segment missing\n");
2476  return AVERROR_INVALIDDATA;
2477  }
2478  }
2479 
2480  while (more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
2481  int ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2482 
2483  x_ctb = (ctb_addr_rs % ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size)) << s->ps.sps->log2_ctb_size;
2484  y_ctb = (ctb_addr_rs / ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size)) << s->ps.sps->log2_ctb_size;
2485  hls_decode_neighbour(s, x_ctb, y_ctb, ctb_addr_ts);
2486 
2487  ret = ff_hevc_cabac_init(s, ctb_addr_ts, 0);
2488  if (ret < 0) {
2489  s->tab_slice_address[ctb_addr_rs] = -1;
2490  return ret;
2491  }
2492 
2493  hls_sao_param(s, x_ctb >> s->ps.sps->log2_ctb_size, y_ctb >> s->ps.sps->log2_ctb_size);
2494 
2495  s->deblock[ctb_addr_rs].beta_offset = s->sh.beta_offset;
2496  s->deblock[ctb_addr_rs].tc_offset = s->sh.tc_offset;
2497  s->filter_slice_edges[ctb_addr_rs] = s->sh.slice_loop_filter_across_slices_enabled_flag;
2498 
2499  more_data = hls_coding_quadtree(s, x_ctb, y_ctb, s->ps.sps->log2_ctb_size, 0);
2500  if (more_data < 0) {
2501  s->tab_slice_address[ctb_addr_rs] = -1;
2502  return more_data;
2503  }
2504 
2505 
2506  ctb_addr_ts++;
2507  ff_hevc_save_states(s, ctb_addr_ts);
2508  ff_hevc_hls_filters(s, x_ctb, y_ctb, ctb_size);
2509  }
2510 
2511  if (x_ctb + ctb_size >= s->ps.sps->width &&
2512  y_ctb + ctb_size >= s->ps.sps->height)
2513  ff_hevc_hls_filter(s, x_ctb, y_ctb, ctb_size);
2514 
2515  return ctb_addr_ts;
2516 }
2517 
2519 {
2520  int arg[2];
2521  int ret[2];
2522 
2523  arg[0] = 0;
2524  arg[1] = 1;
2525 
2526  s->avctx->execute(s->avctx, hls_decode_entry, arg, ret , 1, sizeof(int));
2527  return ret[0];
2528 }
2529 static int hls_decode_entry_wpp(AVCodecContext *avctxt, void *input_ctb_row, int job, int self_id)
2530 {
2531  HEVCContext *s1 = avctxt->priv_data, *s;
2532  HEVCLocalContext *lc;
2533  int ctb_size = 1<< s1->ps.sps->log2_ctb_size;
2534  int more_data = 1;
2535  int *ctb_row_p = input_ctb_row;
2536  int ctb_row = ctb_row_p[job];
2537  int ctb_addr_rs = s1->sh.slice_ctb_addr_rs + ctb_row * ((s1->ps.sps->width + ctb_size - 1) >> s1->ps.sps->log2_ctb_size);
2538  int ctb_addr_ts = s1->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs];
2539  int thread = ctb_row % s1->threads_number;
2540  int ret;
2541 
2542  s = s1->sList[self_id];
2543  lc = s->HEVClc;
2544 
2545  if(ctb_row) {
2546  ret = init_get_bits8(&lc->gb, s->data + s->sh.offset[ctb_row - 1], s->sh.size[ctb_row - 1]);
2547  if (ret < 0)
2548  goto error;
2549  ff_init_cabac_decoder(&lc->cc, s->data + s->sh.offset[(ctb_row)-1], s->sh.size[ctb_row - 1]);
2550  }
2551 
2552  while(more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
2553  int x_ctb = (ctb_addr_rs % s->ps.sps->ctb_width) << s->ps.sps->log2_ctb_size;
2554  int y_ctb = (ctb_addr_rs / s->ps.sps->ctb_width) << s->ps.sps->log2_ctb_size;
2555 
2556  hls_decode_neighbour(s, x_ctb, y_ctb, ctb_addr_ts);
2557 
2558  ff_thread_await_progress2(s->avctx, ctb_row, thread, SHIFT_CTB_WPP);
2559 
2560  if (atomic_load(&s1->wpp_err)) {
2561  ff_thread_report_progress2(s->avctx, ctb_row , thread, SHIFT_CTB_WPP);
2562  return 0;
2563  }
2564 
2565  ret = ff_hevc_cabac_init(s, ctb_addr_ts, thread);
2566  if (ret < 0)
2567  goto error;
2568  hls_sao_param(s, x_ctb >> s->ps.sps->log2_ctb_size, y_ctb >> s->ps.sps->log2_ctb_size);
2569  more_data = hls_coding_quadtree(s, x_ctb, y_ctb, s->ps.sps->log2_ctb_size, 0);
2570 
2571  if (more_data < 0) {
2572  ret = more_data;
2573  goto error;
2574  }
2575 
2576  ctb_addr_ts++;
2577 
2578  ff_hevc_save_states(s, ctb_addr_ts);
2579  ff_thread_report_progress2(s->avctx, ctb_row, thread, 1);
2580  ff_hevc_hls_filters(s, x_ctb, y_ctb, ctb_size);
2581 
2582  if (!more_data && (x_ctb+ctb_size) < s->ps.sps->width && ctb_row != s->sh.num_entry_point_offsets) {
2583  atomic_store(&s1->wpp_err, 1);
2584  ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2585  return 0;
2586  }
2587 
2588  if ((x_ctb+ctb_size) >= s->ps.sps->width && (y_ctb+ctb_size) >= s->ps.sps->height ) {
2589  ff_hevc_hls_filter(s, x_ctb, y_ctb, ctb_size);
2590  ff_thread_report_progress2(s->avctx, ctb_row , thread, SHIFT_CTB_WPP);
2591  return ctb_addr_ts;
2592  }
2593  ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2594  x_ctb+=ctb_size;
2595 
2596  if(x_ctb >= s->ps.sps->width) {
2597  break;
2598  }
2599  }
2600  ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2601 
2602  return 0;
2603 error:
2604  s->tab_slice_address[ctb_addr_rs] = -1;
2605  atomic_store(&s1->wpp_err, 1);
2606  ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2607  return ret;
2608 }
2609 
2610 static int hls_slice_data_wpp(HEVCContext *s, const H2645NAL *nal)
2611 {
2612  const uint8_t *data = nal->data;
2613  int length = nal->size;
2614  HEVCLocalContext *lc = s->HEVClc;
2615  int *ret = av_malloc_array(s->sh.num_entry_point_offsets + 1, sizeof(int));
2616  int *arg = av_malloc_array(s->sh.num_entry_point_offsets + 1, sizeof(int));
2617  int64_t offset;
2618  int64_t startheader, cmpt = 0;
2619  int i, j, res = 0;
2620 
2621  if (!ret || !arg) {
2622  av_free(ret);
2623  av_free(arg);
2624  return AVERROR(ENOMEM);
2625  }
2626 
2627  if (s->sh.slice_ctb_addr_rs + s->sh.num_entry_point_offsets * s->ps.sps->ctb_width >= s->ps.sps->ctb_width * s->ps.sps->ctb_height) {
2628  av_log(s->avctx, AV_LOG_ERROR, "WPP ctb addresses are wrong (%d %d %d %d)\n",
2629  s->sh.slice_ctb_addr_rs, s->sh.num_entry_point_offsets,
2630  s->ps.sps->ctb_width, s->ps.sps->ctb_height
2631  );
2632  res = AVERROR_INVALIDDATA;
2633  goto error;
2634  }
2635 
2636  ff_alloc_entries(s->avctx, s->sh.num_entry_point_offsets + 1);
2637 
2638  for (i = 1; i < s->threads_number; i++) {
2639  if (s->sList[i] && s->HEVClcList[i])
2640  continue;
2641  av_freep(&s->sList[i]);
2642  av_freep(&s->HEVClcList[i]);
2643  s->sList[i] = av_malloc(sizeof(HEVCContext));
2644  s->HEVClcList[i] = av_mallocz(sizeof(HEVCLocalContext));
2645  if (!s->sList[i] || !s->HEVClcList[i]) {
2646  res = AVERROR(ENOMEM);
2647  goto error;
2648  }
2649  memcpy(s->sList[i], s, sizeof(HEVCContext));
2650  s->sList[i]->HEVClc = s->HEVClcList[i];
2651  }
2652 
2653  offset = (lc->gb.index >> 3);
2654 
2655  for (j = 0, cmpt = 0, startheader = offset + s->sh.entry_point_offset[0]; j < nal->skipped_bytes; j++) {
2656  if (nal->skipped_bytes_pos[j] >= offset && nal->skipped_bytes_pos[j] < startheader) {
2657  startheader--;
2658  cmpt++;
2659  }
2660  }
2661 
2662  for (i = 1; i < s->sh.num_entry_point_offsets; i++) {
2663  offset += (s->sh.entry_point_offset[i - 1] - cmpt);
2664  for (j = 0, cmpt = 0, startheader = offset
2665  + s->sh.entry_point_offset[i]; j < nal->skipped_bytes; j++) {
2666  if (nal->skipped_bytes_pos[j] >= offset && nal->skipped_bytes_pos[j] < startheader) {
2667  startheader--;
2668  cmpt++;
2669  }
2670  }
2671  s->sh.size[i - 1] = s->sh.entry_point_offset[i] - cmpt;
2672  s->sh.offset[i - 1] = offset;
2673 
2674  }
2675  if (s->sh.num_entry_point_offsets != 0) {
2676  offset += s->sh.entry_point_offset[s->sh.num_entry_point_offsets - 1] - cmpt;
2677  if (length < offset) {
2678  av_log(s->avctx, AV_LOG_ERROR, "entry_point_offset table is corrupted\n");
2679  res = AVERROR_INVALIDDATA;
2680  goto error;
2681  }
2682  s->sh.size[s->sh.num_entry_point_offsets - 1] = length - offset;
2683  s->sh.offset[s->sh.num_entry_point_offsets - 1] = offset;
2684 
2685  }
2686  s->data = data;
2687 
2688  for (i = 1; i < s->threads_number; i++) {
2689  s->sList[i]->HEVClc->first_qp_group = 1;
2690  s->sList[i]->HEVClc->qp_y = s->sList[0]->HEVClc->qp_y;
2691  memcpy(s->sList[i], s, sizeof(HEVCContext));
2692  s->sList[i]->HEVClc = s->HEVClcList[i];
2693  }
2694 
2695  atomic_store(&s->wpp_err, 0);
2696  ff_reset_entries(s->avctx);
2697 
2698  for (i = 0; i <= s->sh.num_entry_point_offsets; i++) {
2699  arg[i] = i;
2700  ret[i] = 0;
2701  }
2702 
2703  if (s->ps.pps->entropy_coding_sync_enabled_flag)
2704  s->avctx->execute2(s->avctx, hls_decode_entry_wpp, arg, ret, s->sh.num_entry_point_offsets + 1);
2705 
2706  for (i = 0; i <= s->sh.num_entry_point_offsets; i++)
2707  res += ret[i];
2708 error:
2709  av_free(ret);
2710  av_free(arg);
2711  return res;
2712 }
2713 
2715 {
2716  AVFrame *out = s->ref->frame;
2717 
2718  if (s->sei.frame_packing.present &&
2719  s->sei.frame_packing.arrangement_type >= 3 &&
2720  s->sei.frame_packing.arrangement_type <= 5 &&
2721  s->sei.frame_packing.content_interpretation_type > 0 &&
2722  s->sei.frame_packing.content_interpretation_type < 3) {
2724  if (!stereo)
2725  return AVERROR(ENOMEM);
2726 
2727  switch (s->sei.frame_packing.arrangement_type) {
2728  case 3:
2729  if (s->sei.frame_packing.quincunx_subsampling)
2731  else
2732  stereo->type = AV_STEREO3D_SIDEBYSIDE;
2733  break;
2734  case 4:
2735  stereo->type = AV_STEREO3D_TOPBOTTOM;
2736  break;
2737  case 5:
2738  stereo->type = AV_STEREO3D_FRAMESEQUENCE;
2739  break;
2740  }
2741 
2742  if (s->sei.frame_packing.content_interpretation_type == 2)
2743  stereo->flags = AV_STEREO3D_FLAG_INVERT;
2744 
2745  if (s->sei.frame_packing.arrangement_type == 5) {
2746  if (s->sei.frame_packing.current_frame_is_frame0_flag)
2747  stereo->view = AV_STEREO3D_VIEW_LEFT;
2748  else
2749  stereo->view = AV_STEREO3D_VIEW_RIGHT;
2750  }
2751  }
2752 
2753  if (s->sei.display_orientation.present &&
2754  (s->sei.display_orientation.anticlockwise_rotation ||
2755  s->sei.display_orientation.hflip || s->sei.display_orientation.vflip)) {
2756  double angle = s->sei.display_orientation.anticlockwise_rotation * 360 / (double) (1 << 16);
2759  sizeof(int32_t) * 9);
2760  if (!rotation)
2761  return AVERROR(ENOMEM);
2762 
2763  av_display_rotation_set((int32_t *)rotation->data, angle);
2764  av_display_matrix_flip((int32_t *)rotation->data,
2765  s->sei.display_orientation.hflip,
2766  s->sei.display_orientation.vflip);
2767  }
2768 
2769  // Decrement the mastering display flag when IRAP frame has no_rasl_output_flag=1
2770  // so the side data persists for the entire coded video sequence.
2771  if (s->sei.mastering_display.present > 0 &&
2772  IS_IRAP(s) && s->no_rasl_output_flag) {
2773  s->sei.mastering_display.present--;
2774  }
2775  if (s->sei.mastering_display.present) {
2776  // HEVC uses a g,b,r ordering, which we convert to a more natural r,g,b
2777  const int mapping[3] = {2, 0, 1};
2778  const int chroma_den = 50000;
2779  const int luma_den = 10000;
2780  int i;
2781  AVMasteringDisplayMetadata *metadata =
2783  if (!metadata)
2784  return AVERROR(ENOMEM);
2785 
2786  for (i = 0; i < 3; i++) {
2787  const int j = mapping[i];
2788  metadata->display_primaries[i][0].num = s->sei.mastering_display.display_primaries[j][0];
2789  metadata->display_primaries[i][0].den = chroma_den;
2790  metadata->display_primaries[i][1].num = s->sei.mastering_display.display_primaries[j][1];
2791  metadata->display_primaries[i][1].den = chroma_den;
2792  }
2793  metadata->white_point[0].num = s->sei.mastering_display.white_point[0];
2794  metadata->white_point[0].den = chroma_den;
2795  metadata->white_point[1].num = s->sei.mastering_display.white_point[1];
2796  metadata->white_point[1].den = chroma_den;
2797 
2798  metadata->max_luminance.num = s->sei.mastering_display.max_luminance;
2799  metadata->max_luminance.den = luma_den;
2800  metadata->min_luminance.num = s->sei.mastering_display.min_luminance;
2801  metadata->min_luminance.den = luma_den;
2802  metadata->has_luminance = 1;
2803  metadata->has_primaries = 1;
2804 
2805  av_log(s->avctx, AV_LOG_DEBUG, "Mastering Display Metadata:\n");
2806  av_log(s->avctx, AV_LOG_DEBUG,
2807  "r(%5.4f,%5.4f) g(%5.4f,%5.4f) b(%5.4f %5.4f) wp(%5.4f, %5.4f)\n",
2808  av_q2d(metadata->display_primaries[0][0]),
2809  av_q2d(metadata->display_primaries[0][1]),
2810  av_q2d(metadata->display_primaries[1][0]),
2811  av_q2d(metadata->display_primaries[1][1]),
2812  av_q2d(metadata->display_primaries[2][0]),
2813  av_q2d(metadata->display_primaries[2][1]),
2814  av_q2d(metadata->white_point[0]), av_q2d(metadata->white_point[1]));
2815  av_log(s->avctx, AV_LOG_DEBUG,
2816  "min_luminance=%f, max_luminance=%f\n",
2817  av_q2d(metadata->min_luminance), av_q2d(metadata->max_luminance));
2818  }
2819  // Decrement the mastering display flag when IRAP frame has no_rasl_output_flag=1
2820  // so the side data persists for the entire coded video sequence.
2821  if (s->sei.content_light.present > 0 &&
2822  IS_IRAP(s) && s->no_rasl_output_flag) {
2823  s->sei.content_light.present--;
2824  }
2825  if (s->sei.content_light.present) {
2826  AVContentLightMetadata *metadata =
2828  if (!metadata)
2829  return AVERROR(ENOMEM);
2830  metadata->MaxCLL = s->sei.content_light.max_content_light_level;
2831  metadata->MaxFALL = s->sei.content_light.max_pic_average_light_level;
2832 
2833  av_log(s->avctx, AV_LOG_DEBUG, "Content Light Level Metadata:\n");
2834  av_log(s->avctx, AV_LOG_DEBUG, "MaxCLL=%d, MaxFALL=%d\n",
2835  metadata->MaxCLL, metadata->MaxFALL);
2836  }
2837 
2838  if (s->sei.a53_caption.buf_ref) {
2839  HEVCSEIA53Caption *a53 = &s->sei.a53_caption;
2840 
2842  if (!sd)
2843  av_buffer_unref(&a53->buf_ref);
2844  a53->buf_ref = NULL;
2845  }
2846 
2847  for (int i = 0; i < s->sei.unregistered.nb_buf_ref; i++) {
2848  HEVCSEIUnregistered *unreg = &s->sei.unregistered;
2849 
2850  if (unreg->buf_ref[i]) {
2853  unreg->buf_ref[i]);
2854  if (!sd)
2855  av_buffer_unref(&unreg->buf_ref[i]);
2856  unreg->buf_ref[i] = NULL;
2857  }
2858  }
2859  s->sei.unregistered.nb_buf_ref = 0;
2860 
2861  if (s->sei.timecode.present) {
2862  uint32_t *tc_sd;
2863  char tcbuf[AV_TIMECODE_STR_SIZE];
2865  sizeof(uint32_t) * 4);
2866  if (!tcside)
2867  return AVERROR(ENOMEM);
2868 
2869  tc_sd = (uint32_t*)tcside->data;
2870  tc_sd[0] = s->sei.timecode.num_clock_ts;
2871 
2872  for (int i = 0; i < tc_sd[0]; i++) {
2873  int drop = s->sei.timecode.cnt_dropped_flag[i];
2874  int hh = s->sei.timecode.hours_value[i];
2875  int mm = s->sei.timecode.minutes_value[i];
2876  int ss = s->sei.timecode.seconds_value[i];
2877  int ff = s->sei.timecode.n_frames[i];
2878 
2879  tc_sd[i + 1] = av_timecode_get_smpte(s->avctx->framerate, drop, hh, mm, ss, ff);
2880  av_timecode_make_smpte_tc_string2(tcbuf, s->avctx->framerate, tc_sd[i + 1], 0, 0);
2881  av_dict_set(&out->metadata, "timecode", tcbuf, 0);
2882  }
2883 
2884  s->sei.timecode.num_clock_ts = 0;
2885  }
2886 
2887  if (s->sei.film_grain_characteristics.present) {
2888  HEVCSEIFilmGrainCharacteristics *fgc = &s->sei.film_grain_characteristics;
2890  if (!fgp)
2891  return AVERROR(ENOMEM);
2892 
2894  fgp->seed = s->ref->poc; /* no poc_offset in HEVC */
2895 
2896  fgp->codec.h274.model_id = fgc->model_id;
2900  fgp->codec.h274.color_range = fgc->full_range + 1;
2903  fgp->codec.h274.color_space = fgc->matrix_coeffs;
2904  } else {
2905  const HEVCSPS *sps = s->ps.sps;
2906  const VUI *vui = &sps->vui;
2907  fgp->codec.h274.bit_depth_luma = sps->bit_depth;
2908  fgp->codec.h274.bit_depth_chroma = sps->bit_depth_chroma;
2910  fgp->codec.h274.color_range = vui->video_full_range_flag + 1;
2911  else
2916  fgp->codec.h274.color_space = vui->matrix_coeffs;
2917  } else {
2921  }
2922  }
2925 
2927  sizeof(fgp->codec.h274.component_model_present));
2929  sizeof(fgp->codec.h274.num_intensity_intervals));
2930  memcpy(&fgp->codec.h274.num_model_values, &fgc->num_model_values,
2931  sizeof(fgp->codec.h274.num_model_values));
2936  memcpy(&fgp->codec.h274.comp_model_value, &fgc->comp_model_value,
2937  sizeof(fgp->codec.h274.comp_model_value));
2938 
2939  fgc->present = fgc->persistence_flag;
2940  }
2941 
2942  if (s->sei.dynamic_hdr_plus.info) {
2943  AVBufferRef *info_ref = av_buffer_ref(s->sei.dynamic_hdr_plus.info);
2944  if (!info_ref)
2945  return AVERROR(ENOMEM);
2946 
2948  av_buffer_unref(&info_ref);
2949  return AVERROR(ENOMEM);
2950  }
2951  }
2952 
2953  return 0;
2954 }
2955 
2957 {
2958  HEVCLocalContext *lc = s->HEVClc;
2959  int pic_size_in_ctb = ((s->ps.sps->width >> s->ps.sps->log2_min_cb_size) + 1) *
2960  ((s->ps.sps->height >> s->ps.sps->log2_min_cb_size) + 1);
2961  int ret;
2962 
2963  memset(s->horizontal_bs, 0, s->bs_width * s->bs_height);
2964  memset(s->vertical_bs, 0, s->bs_width * s->bs_height);
2965  memset(s->cbf_luma, 0, s->ps.sps->min_tb_width * s->ps.sps->min_tb_height);
2966  memset(s->is_pcm, 0, (s->ps.sps->min_pu_width + 1) * (s->ps.sps->min_pu_height + 1));
2967  memset(s->tab_slice_address, -1, pic_size_in_ctb * sizeof(*s->tab_slice_address));
2968 
2969  s->is_decoded = 0;
2970  s->first_nal_type = s->nal_unit_type;
2971 
2972  s->no_rasl_output_flag = IS_IDR(s) || IS_BLA(s) || (s->nal_unit_type == HEVC_NAL_CRA_NUT && s->last_eos);
2973 
2974  if (s->ps.pps->tiles_enabled_flag)
2975  lc->end_of_tiles_x = s->ps.pps->column_width[0] << s->ps.sps->log2_ctb_size;
2976 
2977  ret = ff_hevc_set_new_ref(s, &s->frame, s->poc);
2978  if (ret < 0)
2979  goto fail;
2980 
2981  ret = ff_hevc_frame_rps(s);
2982  if (ret < 0) {
2983  av_log(s->avctx, AV_LOG_ERROR, "Error constructing the frame RPS.\n");
2984  goto fail;
2985  }
2986 
2987  s->ref->frame->key_frame = IS_IRAP(s);
2988 
2989  s->ref->needs_fg = s->sei.film_grain_characteristics.present &&
2990  !(s->avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN) &&
2991  !s->avctx->hwaccel;
2992 
2993  if (s->ref->needs_fg) {
2994  s->ref->frame_grain->format = s->ref->frame->format;
2995  s->ref->frame_grain->width = s->ref->frame->width;
2996  s->ref->frame_grain->height = s->ref->frame->height;
2997  if ((ret = ff_thread_get_buffer(s->avctx, &s->ref->tf_grain, 0)) < 0)
2998  goto fail;
2999  }
3000 
3001  ret = set_side_data(s);
3002  if (ret < 0)
3003  goto fail;
3004 
3005  s->frame->pict_type = 3 - s->sh.slice_type;
3006 
3007  if (!IS_IRAP(s))
3009 
3010  av_frame_unref(s->output_frame);
3011  ret = ff_hevc_output_frame(s, s->output_frame, 0);
3012  if (ret < 0)
3013  goto fail;
3014 
3015  if (!s->avctx->hwaccel)
3016  ff_thread_finish_setup(s->avctx);
3017 
3018  return 0;
3019 
3020 fail:
3021  if (s->ref)
3022  ff_hevc_unref_frame(s, s->ref, ~0);
3023  s->ref = NULL;
3024  return ret;
3025 }
3026 
3028 {
3029  HEVCFrame *out = s->ref;
3030  const AVFrameSideData *sd;
3031  int ret;
3032 
3033  if (out->needs_fg) {
3035  av_assert0(out->frame_grain->buf[0] && sd);
3036  ret = ff_h274_apply_film_grain(out->frame_grain, out->frame, &s->h274db,
3037  (AVFilmGrainParams *) sd->data);
3038 
3039  if (ret < 0) {
3040  av_log(s->avctx, AV_LOG_WARNING, "Failed synthesizing film "
3041  "grain, ignoring: %s\n", av_err2str(ret));
3042  out->needs_fg = 0;
3043  }
3044  }
3045 
3046  return 0;
3047 }
3048 
3049 static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal)
3050 {
3051  HEVCLocalContext *lc = s->HEVClc;
3052  GetBitContext *gb = &lc->gb;
3053  int ctb_addr_ts, ret;
3054 
3055  *gb = nal->gb;
3056  s->nal_unit_type = nal->type;
3057  s->temporal_id = nal->temporal_id;
3058 
3059  switch (s->nal_unit_type) {
3060  case HEVC_NAL_VPS:
3061  if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
3062  ret = s->avctx->hwaccel->decode_params(s->avctx,
3063  nal->type,
3064  nal->raw_data,
3065  nal->raw_size);
3066  if (ret < 0)
3067  goto fail;
3068  }
3069  ret = ff_hevc_decode_nal_vps(gb, s->avctx, &s->ps);
3070  if (ret < 0)
3071  goto fail;
3072  break;
3073  case HEVC_NAL_SPS:
3074  if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
3075  ret = s->avctx->hwaccel->decode_params(s->avctx,
3076  nal->type,
3077  nal->raw_data,
3078  nal->raw_size);
3079  if (ret < 0)
3080  goto fail;
3081  }
3082  ret = ff_hevc_decode_nal_sps(gb, s->avctx, &s->ps,
3083  s->apply_defdispwin);
3084  if (ret < 0)
3085  goto fail;
3086  break;
3087  case HEVC_NAL_PPS:
3088  if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
3089  ret = s->avctx->hwaccel->decode_params(s->avctx,
3090  nal->type,
3091  nal->raw_data,
3092  nal->raw_size);
3093  if (ret < 0)
3094  goto fail;
3095  }
3096  ret = ff_hevc_decode_nal_pps(gb, s->avctx, &s->ps);
3097  if (ret < 0)
3098  goto fail;
3099  break;
3100  case HEVC_NAL_SEI_PREFIX:
3101  case HEVC_NAL_SEI_SUFFIX:
3102  if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
3103  ret = s->avctx->hwaccel->decode_params(s->avctx,
3104  nal->type,
3105  nal->raw_data,
3106  nal->raw_size);
3107  if (ret < 0)
3108  goto fail;
3109  }
3110  ret = ff_hevc_decode_nal_sei(gb, s->avctx, &s->sei, &s->ps, s->nal_unit_type);
3111  if (ret < 0)
3112  goto fail;
3113  break;
3114  case HEVC_NAL_TRAIL_R:
3115  case HEVC_NAL_TRAIL_N:
3116  case HEVC_NAL_TSA_N:
3117  case HEVC_NAL_TSA_R:
3118  case HEVC_NAL_STSA_N:
3119  case HEVC_NAL_STSA_R:
3120  case HEVC_NAL_BLA_W_LP:
3121  case HEVC_NAL_BLA_W_RADL:
3122  case HEVC_NAL_BLA_N_LP:
3123  case HEVC_NAL_IDR_W_RADL:
3124  case HEVC_NAL_IDR_N_LP:
3125  case HEVC_NAL_CRA_NUT:
3126  case HEVC_NAL_RADL_N:
3127  case HEVC_NAL_RADL_R:
3128  case HEVC_NAL_RASL_N:
3129  case HEVC_NAL_RASL_R:
3130  ret = hls_slice_header(s);
3131  if (ret < 0)
3132  return ret;
3133  if (ret == 1) {
3135  goto fail;
3136  }
3137 
3138 
3139  if (
3140  (s->avctx->skip_frame >= AVDISCARD_BIDIR && s->sh.slice_type == HEVC_SLICE_B) ||
3141  (s->avctx->skip_frame >= AVDISCARD_NONINTRA && s->sh.slice_type != HEVC_SLICE_I) ||
3142  (s->avctx->skip_frame >= AVDISCARD_NONKEY && !IS_IRAP(s))) {
3143  break;
3144  }
3145 
3146  if (s->sh.first_slice_in_pic_flag) {
3147  if (s->max_ra == INT_MAX) {
3148  if (s->nal_unit_type == HEVC_NAL_CRA_NUT || IS_BLA(s)) {
3149  s->max_ra = s->poc;
3150  } else {
3151  if (IS_IDR(s))
3152  s->max_ra = INT_MIN;
3153  }
3154  }
3155 
3156  if ((s->nal_unit_type == HEVC_NAL_RASL_R || s->nal_unit_type == HEVC_NAL_RASL_N) &&
3157  s->poc <= s->max_ra) {
3158  s->is_decoded = 0;
3159  break;
3160  } else {
3161  if (s->nal_unit_type == HEVC_NAL_RASL_R && s->poc > s->max_ra)
3162  s->max_ra = INT_MIN;
3163  }
3164 
3165  s->overlap ++;
3166  ret = hevc_frame_start(s);
3167  if (ret < 0)
3168  return ret;
3169  } else if (!s->ref) {
3170  av_log(s->avctx, AV_LOG_ERROR, "First slice in a frame missing.\n");
3171  goto fail;
3172  }
3173 
3174  if (s->nal_unit_type != s->first_nal_type) {
3175  av_log(s->avctx, AV_LOG_ERROR,
3176  "Non-matching NAL types of the VCL NALUs: %d %d\n",
3177  s->first_nal_type, s->nal_unit_type);
3178  return AVERROR_INVALIDDATA;
3179  }
3180 
3181  if (!s->sh.dependent_slice_segment_flag &&
3182  s->sh.slice_type != HEVC_SLICE_I) {
3183  ret = ff_hevc_slice_rpl(s);
3184  if (ret < 0) {
3185  av_log(s->avctx, AV_LOG_WARNING,
3186  "Error constructing the reference lists for the current slice.\n");
3187  goto fail;
3188  }
3189  }
3190 
3191  if (s->sh.first_slice_in_pic_flag && s->avctx->hwaccel) {
3192  ret = s->avctx->hwaccel->start_frame(s->avctx, NULL, 0);
3193  if (ret < 0)
3194  goto fail;
3195  }
3196 
3197  if (s->avctx->hwaccel) {
3198  ret = s->avctx->hwaccel->decode_slice(s->avctx, nal->raw_data, nal->raw_size);
3199  if (ret < 0)
3200  goto fail;
3201  } else {
3202  if (s->threads_number > 1 && s->sh.num_entry_point_offsets > 0)
3203  ctb_addr_ts = hls_slice_data_wpp(s, nal);
3204  else
3205  ctb_addr_ts = hls_slice_data(s);
3206  if (ctb_addr_ts >= (s->ps.sps->ctb_width * s->ps.sps->ctb_height)) {
3207  ret = hevc_frame_end(s);
3208  if (ret < 0)
3209  goto fail;
3210  s->is_decoded = 1;
3211  }
3212 
3213  if (ctb_addr_ts < 0) {
3214  ret = ctb_addr_ts;
3215  goto fail;
3216  }
3217  }
3218  break;
3219  case HEVC_NAL_EOS_NUT:
3220  case HEVC_NAL_EOB_NUT:
3221  s->seq_decode = (s->seq_decode + 1) & 0xff;
3222  s->max_ra = INT_MAX;
3223  break;
3224  case HEVC_NAL_AUD:
3225  case HEVC_NAL_FD_NUT:
3226  break;
3227  default:
3228  av_log(s->avctx, AV_LOG_INFO,
3229  "Skipping NAL unit %d\n", s->nal_unit_type);
3230  }
3231 
3232  return 0;
3233 fail:
3234  if (s->avctx->err_recognition & AV_EF_EXPLODE)
3235  return ret;
3236  return 0;
3237 }
3238 
3239 static int decode_nal_units(HEVCContext *s, const uint8_t *buf, int length)
3240 {
3241  int i, ret = 0;
3242  int eos_at_start = 1;
3243 
3244  s->ref = NULL;
3245  s->last_eos = s->eos;
3246  s->eos = 0;
3247  s->overlap = 0;
3248 
3249  /* split the input packet into NAL units, so we know the upper bound on the
3250  * number of slices in the frame */
3251  ret = ff_h2645_packet_split(&s->pkt, buf, length, s->avctx, s->is_nalff,
3252  s->nal_length_size, s->avctx->codec_id, 1, 0);
3253  if (ret < 0) {
3254  av_log(s->avctx, AV_LOG_ERROR,
3255  "Error splitting the input into NAL units.\n");
3256  return ret;
3257  }
3258 
3259  for (i = 0; i < s->pkt.nb_nals; i++) {
3260  if (s->pkt.nals[i].type == HEVC_NAL_EOB_NUT ||
3261  s->pkt.nals[i].type == HEVC_NAL_EOS_NUT) {
3262  if (eos_at_start) {
3263  s->last_eos = 1;
3264  } else {
3265  s->eos = 1;
3266  }
3267  } else {
3268  eos_at_start = 0;
3269  }
3270  }
3271 
3272  /* decode the NAL units */
3273  for (i = 0; i < s->pkt.nb_nals; i++) {
3274  H2645NAL *nal = &s->pkt.nals[i];
3275 
3276  if (s->avctx->skip_frame >= AVDISCARD_ALL ||
3277  (s->avctx->skip_frame >= AVDISCARD_NONREF
3278  && ff_hevc_nal_is_nonref(nal->type)) || nal->nuh_layer_id > 0)
3279  continue;
3280 
3281  ret = decode_nal_unit(s, nal);
3282  if (ret >= 0 && s->overlap > 2)
3284  if (ret < 0) {
3285  av_log(s->avctx, AV_LOG_WARNING,
3286  "Error parsing NAL unit #%d.\n", i);
3287  goto fail;
3288  }
3289  }
3290 
3291 fail:
3292  if (s->ref && s->threads_type == FF_THREAD_FRAME)
3293  ff_thread_report_progress(&s->ref->tf, INT_MAX, 0);
3294 
3295  return ret;
3296 }
3297 
3298 static void print_md5(void *log_ctx, int level, uint8_t md5[16])
3299 {
3300  int i;
3301  for (i = 0; i < 16; i++)
3302  av_log(log_ctx, level, "%02"PRIx8, md5[i]);
3303 }
3304 
3306 {
3308  int pixel_shift;
3309  int i, j;
3310 
3311  if (!desc)
3312  return AVERROR(EINVAL);
3313 
3314  pixel_shift = desc->comp[0].depth > 8;
3315 
3316  av_log(s->avctx, AV_LOG_DEBUG, "Verifying checksum for frame with POC %d: ",
3317  s->poc);
3318 
3319  /* the checksums are LE, so we have to byteswap for >8bpp formats
3320  * on BE arches */
3321 #if HAVE_BIGENDIAN
3322  if (pixel_shift && !s->checksum_buf) {
3323  av_fast_malloc(&s->checksum_buf, &s->checksum_buf_size,
3324  FFMAX3(frame->linesize[0], frame->linesize[1],
3325  frame->linesize[2]));
3326  if (!s->checksum_buf)
3327  return AVERROR(ENOMEM);
3328  }
3329 #endif
3330 
3331  for (i = 0; frame->data[i]; i++) {
3332  int width = s->avctx->coded_width;
3333  int height = s->avctx->coded_height;
3334  int w = (i == 1 || i == 2) ? (width >> desc->log2_chroma_w) : width;
3335  int h = (i == 1 || i == 2) ? (height >> desc->log2_chroma_h) : height;
3336  uint8_t md5[16];
3337 
3338  av_md5_init(s->md5_ctx);
3339  for (j = 0; j < h; j++) {
3340  const uint8_t *src = frame->data[i] + j * frame->linesize[i];
3341 #if HAVE_BIGENDIAN
3342  if (pixel_shift) {
3343  s->bdsp.bswap16_buf((uint16_t *) s->checksum_buf,
3344  (const uint16_t *) src, w);
3345  src = s->checksum_buf;
3346  }
3347 #endif
3348  av_md5_update(s->md5_ctx, src, w << pixel_shift);
3349  }
3350  av_md5_final(s->md5_ctx, md5);
3351 
3352  if (!memcmp(md5, s->sei.picture_hash.md5[i], 16)) {
3353  av_log (s->avctx, AV_LOG_DEBUG, "plane %d - correct ", i);
3354  print_md5(s->avctx, AV_LOG_DEBUG, md5);
3355  av_log (s->avctx, AV_LOG_DEBUG, "; ");
3356  } else {
3357  av_log (s->avctx, AV_LOG_ERROR, "mismatching checksum of plane %d - ", i);
3358  print_md5(s->avctx, AV_LOG_ERROR, md5);
3359  av_log (s->avctx, AV_LOG_ERROR, " != ");
3360  print_md5(s->avctx, AV_LOG_ERROR, s->sei.picture_hash.md5[i]);
3361  av_log (s->avctx, AV_LOG_ERROR, "\n");
3362  return AVERROR_INVALIDDATA;
3363  }
3364  }
3365 
3366  av_log(s->avctx, AV_LOG_DEBUG, "\n");
3367 
3368  return 0;
3369 }
3370 
3371 static int hevc_decode_extradata(HEVCContext *s, uint8_t *buf, int length, int first)
3372 {
3373  int ret, i;
3374 
3375  ret = ff_hevc_decode_extradata(buf, length, &s->ps, &s->sei, &s->is_nalff,
3376  &s->nal_length_size, s->avctx->err_recognition,
3377  s->apply_defdispwin, s->avctx);
3378  if (ret < 0)
3379  return ret;
3380 
3381  /* export stream parameters from the first SPS */
3382  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.sps_list); i++) {
3383  if (first && s->ps.sps_list[i]) {
3384  const HEVCSPS *sps = (const HEVCSPS*)s->ps.sps_list[i]->data;
3386  break;
3387  }
3388  }
3389 
3390  /* export stream parameters from SEI */
3392  if (ret < 0)
3393  return ret;
3394 
3395  return 0;
3396 }
3397 
3398 static int hevc_decode_frame(AVCodecContext *avctx, void *data, int *got_output,
3399  AVPacket *avpkt)
3400 {
3401  int ret;
3402  size_t new_extradata_size;
3403  uint8_t *new_extradata;
3404  HEVCContext *s = avctx->priv_data;
3405 
3406  if (!avpkt->size) {
3407  ret = ff_hevc_output_frame(s, data, 1);
3408  if (ret < 0)
3409  return ret;
3410 
3411  *got_output = ret;
3412  return 0;
3413  }
3414 
3415  new_extradata = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA,
3416  &new_extradata_size);
3417  if (new_extradata && new_extradata_size > 0) {
3418  ret = hevc_decode_extradata(s, new_extradata, new_extradata_size, 0);
3419  if (ret < 0)
3420  return ret;
3421  }
3422 
3423  s->ref = NULL;
3424  ret = decode_nal_units(s, avpkt->data, avpkt->size);
3425  if (ret < 0)
3426  return ret;
3427 
3428  if (avctx->hwaccel) {
3429  if (s->ref && (ret = avctx->hwaccel->end_frame(avctx)) < 0) {
3430  av_log(avctx, AV_LOG_ERROR,
3431  "hardware accelerator failed to decode picture\n");
3432  ff_hevc_unref_frame(s, s->ref, ~0);
3433  return ret;
3434  }
3435  } else {
3436  /* verify the SEI checksum */
3437  if (avctx->err_recognition & AV_EF_CRCCHECK && s->is_decoded &&
3438  s->sei.picture_hash.is_md5) {
3439  ret = verify_md5(s, s->ref->frame);
3440  if (ret < 0 && avctx->err_recognition & AV_EF_EXPLODE) {
3441  ff_hevc_unref_frame(s, s->ref, ~0);
3442  return ret;
3443  }
3444  }
3445  }
3446  s->sei.picture_hash.is_md5 = 0;
3447 
3448  if (s->is_decoded) {
3449  av_log(avctx, AV_LOG_DEBUG, "Decoded frame with POC %d.\n", s->poc);
3450  s->is_decoded = 0;
3451  }
3452 
3453  if (s->output_frame->buf[0]) {
3454  av_frame_move_ref(data, s->output_frame);
3455  *got_output = 1;
3456  }
3457 
3458  return avpkt->size;
3459 }
3460 
3462 {
3463  int ret;
3464 
3465  ret = ff_thread_ref_frame(&dst->tf, &src->tf);
3466  if (ret < 0)
3467  return ret;
3468 
3469  if (src->needs_fg) {
3470  ret = ff_thread_ref_frame(&dst->tf_grain, &src->tf_grain);
3471  if (ret < 0)
3472  return ret;
3473  dst->needs_fg = 1;
3474  }
3475 
3476  dst->tab_mvf_buf = av_buffer_ref(src->tab_mvf_buf);
3477  if (!dst->tab_mvf_buf)
3478  goto fail;
3479  dst->tab_mvf = src->tab_mvf;
3480 
3481  dst->rpl_tab_buf = av_buffer_ref(src->rpl_tab_buf);
3482  if (!dst->rpl_tab_buf)
3483  goto fail;
3484  dst->rpl_tab = src->rpl_tab;
3485 
3486  dst->rpl_buf = av_buffer_ref(src->rpl_buf);
3487  if (!dst->rpl_buf)
3488  goto fail;
3489 
3490  dst->poc = src->poc;
3491  dst->ctb_count = src->ctb_count;
3492  dst->flags = src->flags;
3493  dst->sequence = src->sequence;
3494 
3495  if (src->hwaccel_picture_private) {
3496  dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
3497  if (!dst->hwaccel_priv_buf)
3498  goto fail;
3500  }
3501 
3502  return 0;
3503 fail:
3504  ff_hevc_unref_frame(s, dst, ~0);
3505  return AVERROR(ENOMEM);
3506 }
3507 
3509 {
3510  HEVCContext *s = avctx->priv_data;
3511  int i;
3512 
3513  pic_arrays_free(s);
3514 
3515  av_freep(&s->md5_ctx);
3516 
3517  av_freep(&s->cabac_state);
3518 
3519  for (i = 0; i < 3; i++) {
3520  av_freep(&s->sao_pixel_buffer_h[i]);
3521  av_freep(&s->sao_pixel_buffer_v[i]);
3522  }
3523  av_frame_free(&s->output_frame);
3524 
3525  for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3526  ff_hevc_unref_frame(s, &s->DPB[i], ~0);
3527  av_frame_free(&s->DPB[i].frame);
3528  av_frame_free(&s->DPB[i].frame_grain);
3529  }
3530 
3531  ff_hevc_ps_uninit(&s->ps);
3532 
3533  av_freep(&s->sh.entry_point_offset);
3534  av_freep(&s->sh.offset);
3535  av_freep(&s->sh.size);
3536 
3537  if (s->HEVClcList && s->sList) {
3538  for (i = 1; i < s->threads_number; i++) {
3539  av_freep(&s->HEVClcList[i]);
3540  av_freep(&s->sList[i]);
3541  }
3542  }
3543  av_freep(&s->HEVClc);
3544  av_freep(&s->HEVClcList);
3545  av_freep(&s->sList);
3546 
3547  ff_h2645_packet_uninit(&s->pkt);
3548 
3549  ff_hevc_reset_sei(&s->sei);
3550 
3551  return 0;
3552 }
3553 
3555 {
3556  HEVCContext *s = avctx->priv_data;
3557  int i;
3558 
3559  s->avctx = avctx;
3560 
3561  s->HEVClc = av_mallocz(sizeof(HEVCLocalContext));
3562  s->HEVClcList = av_mallocz(sizeof(HEVCLocalContext*) * s->threads_number);
3563  s->sList = av_mallocz(sizeof(HEVCContext*) * s->threads_number);
3564  if (!s->HEVClc || !s->HEVClcList || !s->sList)
3565  goto fail;
3566  s->HEVClcList[0] = s->HEVClc;
3567  s->sList[0] = s;
3568 
3569  s->cabac_state = av_malloc(HEVC_CONTEXTS);
3570  if (!s->cabac_state)
3571  goto fail;
3572 
3573  s->output_frame = av_frame_alloc();
3574  if (!s->output_frame)
3575  goto fail;
3576 
3577  for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3578  s->DPB[i].frame = av_frame_alloc();
3579  if (!s->DPB[i].frame)
3580  goto fail;
3581  s->DPB[i].tf.f = s->DPB[i].frame;
3582 
3583  s->DPB[i].frame_grain = av_frame_alloc();
3584  if (!s->DPB[i].frame_grain)
3585  goto fail;
3586  s->DPB[i].tf_grain.f = s->DPB[i].frame_grain;
3587  }
3588 
3589  s->max_ra = INT_MAX;
3590 
3591  s->md5_ctx = av_md5_alloc();
3592  if (!s->md5_ctx)
3593  goto fail;
3594 
3595  ff_bswapdsp_init(&s->bdsp);
3596 
3597  s->context_initialized = 1;
3598  s->eos = 0;
3599 
3600  ff_hevc_reset_sei(&s->sei);
3601 
3602  return 0;
3603 
3604 fail:
3605  hevc_decode_free(avctx);
3606  return AVERROR(ENOMEM);
3607 }
3608 
3609 #if HAVE_THREADS
3610 static int hevc_update_thread_context(AVCodecContext *dst,
3611  const AVCodecContext *src)
3612 {
3613  HEVCContext *s = dst->priv_data;
3614  HEVCContext *s0 = src->priv_data;
3615  int i, ret;
3616 
3617  if (!s->context_initialized) {
3618  ret = hevc_init_context(dst);
3619  if (ret < 0)
3620  return ret;
3621  }
3622 
3623  for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3624  ff_hevc_unref_frame(s, &s->DPB[i], ~0);
3625  if (s0->DPB[i].frame->buf[0]) {
3626  ret = hevc_ref_frame(s, &s->DPB[i], &s0->DPB[i]);
3627  if (ret < 0)
3628  return ret;
3629  }
3630  }
3631 
3632  if (s->ps.sps != s0->ps.sps)
3633  s->ps.sps = NULL;
3634  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.vps_list); i++) {
3635  ret = av_buffer_replace(&s->ps.vps_list[i], s0->ps.vps_list[i]);
3636  if (ret < 0)
3637  return ret;
3638  }
3639 
3640  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.sps_list); i++) {
3641  ret = av_buffer_replace(&s->ps.sps_list[i], s0->ps.sps_list[i]);
3642  if (ret < 0)
3643  return ret;
3644  }
3645 
3646  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.pps_list); i++) {
3647  ret = av_buffer_replace(&s->ps.pps_list[i], s0->ps.pps_list[i]);
3648  if (ret < 0)
3649  return ret;
3650  }
3651 
3652  if (s->ps.sps != s0->ps.sps)
3653  if ((ret = set_sps(s, s0->ps.sps, src->pix_fmt)) < 0)
3654  return ret;
3655 
3656  s->seq_decode = s0->seq_decode;
3657  s->seq_output = s0->seq_output;
3658  s->pocTid0 = s0->pocTid0;
3659  s->max_ra = s0->max_ra;
3660  s->eos = s0->eos;
3661  s->no_rasl_output_flag = s0->no_rasl_output_flag;
3662 
3663  s->is_nalff = s0->is_nalff;
3664  s->nal_length_size = s0->nal_length_size;
3665 
3666  s->threads_number = s0->threads_number;
3667  s->threads_type = s0->threads_type;
3668 
3669  if (s0->eos) {
3670  s->seq_decode = (s->seq_decode + 1) & 0xff;
3671  s->max_ra = INT_MAX;
3672  }
3673 
3674  ret = av_buffer_replace(&s->sei.a53_caption.buf_ref, s0->sei.a53_caption.buf_ref);
3675  if (ret < 0)
3676  return ret;
3677 
3678  for (i = 0; i < s->sei.unregistered.nb_buf_ref; i++)
3679  av_buffer_unref(&s->sei.unregistered.buf_ref[i]);
3680  s->sei.unregistered.nb_buf_ref = 0;
3681 
3682  if (s0->sei.unregistered.nb_buf_ref) {
3683  ret = av_reallocp_array(&s->sei.unregistered.buf_ref,
3684  s0->sei.unregistered.nb_buf_ref,
3685  sizeof(*s->sei.unregistered.buf_ref));
3686  if (ret < 0)
3687  return ret;
3688 
3689  for (i = 0; i < s0->sei.unregistered.nb_buf_ref; i++) {
3690  s->sei.unregistered.buf_ref[i] = av_buffer_ref(s0->sei.unregistered.buf_ref[i]);
3691  if (!s->sei.unregistered.buf_ref[i])
3692  return AVERROR(ENOMEM);
3693  s->sei.unregistered.nb_buf_ref++;
3694  }
3695  }
3696 
3697  ret = av_buffer_replace(&s->sei.dynamic_hdr_plus.info, s0->sei.dynamic_hdr_plus.info);
3698  if (ret < 0)
3699  return ret;
3700 
3701  s->sei.frame_packing = s0->sei.frame_packing;
3702  s->sei.display_orientation = s0->sei.display_orientation;
3703  s->sei.mastering_display = s0->sei.mastering_display;
3704  s->sei.content_light = s0->sei.content_light;
3705  s->sei.alternative_transfer = s0->sei.alternative_transfer;
3706 
3708  if (ret < 0)
3709  return ret;
3710 
3711  return 0;
3712 }
3713 #endif
3714 
3716 {
3717  HEVCContext *s = avctx->priv_data;
3718  int ret;
3719 
3720  if(avctx->active_thread_type & FF_THREAD_SLICE)
3721  s->threads_number = avctx->thread_count;
3722  else
3723  s->threads_number = 1;
3724 
3725  if((avctx->active_thread_type & FF_THREAD_FRAME) && avctx->thread_count > 1)
3726  s->threads_type = FF_THREAD_FRAME;
3727  else
3728  s->threads_type = FF_THREAD_SLICE;
3729 
3730  ret = hevc_init_context(avctx);
3731  if (ret < 0)
3732  return ret;
3733 
3734  s->enable_parallel_tiles = 0;
3735  s->sei.picture_timing.picture_struct = 0;
3736  s->eos = 1;
3737 
3738  atomic_init(&s->wpp_err, 0);
3739 
3740  if (!avctx->internal->is_copy) {
3741  if (avctx->extradata_size > 0 && avctx->extradata) {
3742  ret = hevc_decode_extradata(s, avctx->extradata, avctx->extradata_size, 1);
3743  if (ret < 0) {
3744  return ret;
3745  }
3746  }
3747  }
3748 
3749  return 0;
3750 }
3751 
3753 {
3754  HEVCContext *s = avctx->priv_data;
3756  ff_hevc_reset_sei(&s->sei);
3757  s->max_ra = INT_MAX;
3758  s->eos = 1;
3759 }
3760 
3761 #define OFFSET(x) offsetof(HEVCContext, x)
3762 #define PAR (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
3763 
3764 static const AVOption options[] = {
3765  { "apply_defdispwin", "Apply default display window from VUI", OFFSET(apply_defdispwin),
3766  AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, PAR },
3767  { "strict-displaywin", "stricly apply default display window size", OFFSET(apply_defdispwin),
3768  AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, PAR },
3769  { NULL },
3770 };
3771 
3772 static const AVClass hevc_decoder_class = {
3773  .class_name = "HEVC decoder",
3774  .item_name = av_default_item_name,
3775  .option = options,
3776  .version = LIBAVUTIL_VERSION_INT,
3777 };
3778 
3780  .name = "hevc",
3781  .long_name = NULL_IF_CONFIG_SMALL("HEVC (High Efficiency Video Coding)"),
3782  .type = AVMEDIA_TYPE_VIDEO,
3783  .id = AV_CODEC_ID_HEVC,
3784  .priv_data_size = sizeof(HEVCContext),
3785  .priv_class = &hevc_decoder_class,
3787  .close = hevc_decode_free,
3790  .update_thread_context = ONLY_IF_THREADS_ENABLED(hevc_update_thread_context),
3791  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY |
3796  .hw_configs = (const AVCodecHWConfigInternal *const []) {
3797 #if CONFIG_HEVC_DXVA2_HWACCEL
3798  HWACCEL_DXVA2(hevc),
3799 #endif
3800 #if CONFIG_HEVC_D3D11VA_HWACCEL
3801  HWACCEL_D3D11VA(hevc),
3802 #endif
3803 #if CONFIG_HEVC_D3D11VA2_HWACCEL
3804  HWACCEL_D3D11VA2(hevc),
3805 #endif
3806 #if CONFIG_HEVC_NVDEC_HWACCEL
3807  HWACCEL_NVDEC(hevc),
3808 #endif
3809 #if CONFIG_HEVC_VAAPI_HWACCEL
3810  HWACCEL_VAAPI(hevc),
3811 #endif
3812 #if CONFIG_HEVC_VDPAU_HWACCEL
3813  HWACCEL_VDPAU(hevc),
3814 #endif
3815 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
3816  HWACCEL_VIDEOTOOLBOX(hevc),
3817 #endif
3818  NULL
3819  },
3820 };
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:31
HEVCSEIFilmGrainCharacteristics::comp_model_present_flag
int comp_model_present_flag[3]
Definition: hevc_sei.h:124
verify_md5
static int verify_md5(HEVCContext *s, AVFrame *frame)
Definition: hevcdec.c:3305
hwconfig.h
AVMasteringDisplayMetadata::has_primaries
int has_primaries
Flag indicating whether the display primaries (and white point) are set.
Definition: mastering_display_metadata.h:62
HEVC_NAL_RADL_N
@ HEVC_NAL_RADL_N
Definition: hevc.h:35
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1359
SliceHeader::beta_offset
int beta_offset
beta_offset_div2 * 2
Definition: hevcdec.h:296
AVCodec
AVCodec.
Definition: codec.h:202
bswapdsp.h
L1
F H1 F F H1 F F F F H1<-F-------F-------F v v v H2 H3 H2 ^ ^ ^ F-------F-------F-> H1<-F-------F-------F|||||||||F H1 F|||||||||F H1 Funavailable fullpel samples(outside the picture for example) shall be equalto the closest available fullpel sampleSmaller pel interpolation:--------------------------if diag_mc is set then points which lie on a line between 2 vertically, horizontally or diagonally adjacent halfpel points shall be interpolatedlinearly with rounding to nearest and halfway values rounded up.points which lie on 2 diagonals at the same time should only use the onediagonal not containing the fullpel point F--> O q O<--h1-> O q O<--F v \/v \/v O O O O O O O|/|\|q q q q q|/|\|O O O O O O O ^/\ ^/\ ^ h2--> O q O<--h3-> O q O<--h2 v \/v \/v O O O O O O O|\|/|q q q q q|\|/|O O O O O O O ^/\ ^/\ ^ F--> O q O<--h1-> O q O<--Fthe remaining points shall be bilinearly interpolated from theup to 4 surrounding halfpel and fullpel points, again rounding should be tonearest and halfway values rounded upcompliant Snow decoders MUST support 1-1/8 pel luma and 1/2-1/16 pel chromainterpolation at leastOverlapped block motion compensation:-------------------------------------FIXMELL band prediction:===================Each sample in the LL0 subband is predicted by the median of the left, top andleft+top-topleft samples, samples outside the subband shall be considered tobe 0. To reverse this prediction in the decoder apply the following.for(y=0;y< height;y++){ for(x=0;x< width;x++){ sample[y][x]+=median(sample[y-1][x], sample[y][x-1], sample[y-1][x]+sample[y][x-1]-sample[y-1][x-1]);}}sample[-1][ *]=sample[ *][-1]=0;width, height here are the width and height of the LL0 subband not of the finalvideoDequantization:===============FIXMEWavelet Transform:==================Snow supports 2 wavelet transforms, the symmetric biorthogonal 5/3 integertransform and an integer approximation of the symmetric biorthogonal 9/7daubechies wavelet.2D IDWT(inverse discrete wavelet transform) --------------------------------------------The 2D IDWT applies a 2D filter recursively, each time combining the4 lowest frequency subbands into a single subband until only 1 subbandremains.The 2D filter is done by first applying a 1D filter in the vertical directionand then applying it in the horizontal one. --------------- --------------- --------------- ---------------|LL0|HL0|||||||||||||---+---|HL1||L0|H0|HL1||LL1|HL1|||||LH0|HH0|||||||||||||-------+-------|-> L1 H1 LH1 HH1 LH1 HH1 LH1 HH1 L1
Definition: snow.txt:554
stride
int stride
Definition: mace.c:144
av_buffer_pool_init
AVBufferPool * av_buffer_pool_init(size_t size, AVBufferRef *(*alloc)(size_t size))
Allocate and initialize a buffer pool.
Definition: buffer.c:280
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_TIMECODE_STR_SIZE
#define AV_TIMECODE_STR_SIZE
Definition: timecode.h:33
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:42
HEVCLocalContext
Definition: hevcdec.h:426
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:225
HEVCFrame::flags
uint8_t flags
A combination of HEVC_FRAME_FLAG_*.
Definition: hevcdec.h:423
AVMasteringDisplayMetadata::max_luminance
AVRational max_luminance
Max luminance of mastering display (cd/m^2).
Definition: mastering_display_metadata.h:57
HWACCEL_MAX
#define HWACCEL_MAX
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
ff_hevc_sao_type_idx_decode
int ff_hevc_sao_type_idx_decode(HEVCContext *s)
Definition: hevc_cabac.c:573
HEVCFrame::tf
ThreadFrame tf
Definition: hevcdec.h:397
HEVCFrame::hwaccel_priv_buf
AVBufferRef * hwaccel_priv_buf
Definition: hevcdec.h:411
level
uint8_t level
Definition: svq3.c:204
ff_hevc_no_residual_syntax_flag_decode
int ff_hevc_no_residual_syntax_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:835
AV_STEREO3D_VIEW_LEFT
@ AV_STEREO3D_VIEW_LEFT
Frame contains only the left view.
Definition: stereo3d.h:156
av_clip
#define av_clip
Definition: common.h:96
atomic_store
#define atomic_store(object, desired)
Definition: stdatomic.h:85
hls_decode_neighbour
static void hls_decode_neighbour(HEVCContext *s, int x_ctb, int y_ctb, int ctb_addr_ts)
Definition: hevcdec.c:2408
ff_hevc_sao_eo_class_decode
int ff_hevc_sao_eo_class_decode(HEVCContext *s)
Definition: hevc_cabac.c:608
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:850
ff_hevc_pred_init
void ff_hevc_pred_init(HEVCPredContext *hpc, int bit_depth)
Definition: hevcpred.c:43
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AV_STEREO3D_SIDEBYSIDE_QUINCUNX
@ AV_STEREO3D_SIDEBYSIDE_QUINCUNX
Views are next to each other, but when upscaling apply a checkerboard pattern.
Definition: stereo3d.h:117
HEVCSEIUnregistered
Definition: hevc_sei.h:64
hevc_decode_flush
static void hevc_decode_flush(AVCodecContext *avctx)
Definition: hevcdec.c:3752
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:960
ff_hevc_set_qPy
void ff_hevc_set_qPy(HEVCContext *s, int xBase, int yBase, int log2_cb_size)
Definition: hevc_filter.c:118
chroma_mc_bi
static void chroma_mc_bi(HEVCContext *s, uint8_t *dst0, ptrdiff_t dststride, AVFrame *ref0, AVFrame *ref1, int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int cidx)
8.5.3.2.2.2 Chroma sample bidirectional interpolation process
Definition: hevcdec.c:1701
PART_NxN
@ PART_NxN
Definition: hevcdec.h:146
luma_mc_bi
static void luma_mc_bi(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride, AVFrame *ref0, const Mv *mv0, int x_off, int y_off, int block_w, int block_h, AVFrame *ref1, const Mv *mv1, struct MvField *current_mv)
8.5.3.2.2.1 Luma sample bidirectional interpolation process
Definition: hevcdec.c:1545
ff_hevc_res_scale_sign_flag
int ff_hevc_res_scale_sign_flag(HEVCContext *s, int idx)
Definition: hevc_cabac.c:912
decode_nal_unit
static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal)
Definition: hevcdec.c:3049
ff_hevc_split_transform_flag_decode
int ff_hevc_split_transform_flag_decode(HEVCContext *s, int log2_trafo_size)
Definition: hevc_cabac.c:873
ff_thread_ref_frame
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
Definition: utils.c:885
out
FILE * out
Definition: movenc.c:54
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:617
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:605
SAO_BAND
@ SAO_BAND
Definition: hevcdec.h:212
ff_hevc_profiles
const AVProfile ff_hevc_profiles[]
Definition: profiles.c:83
HEVCSEIFilmGrainCharacteristics::matrix_coeffs
int matrix_coeffs
Definition: hevc_sei.h:121
AVFilmGrainH274Params::color_space
enum AVColorSpace color_space
Definition: film_grain_params.h:152
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2564
ff_hevc_hls_filter
void ff_hevc_hls_filter(HEVCContext *s, int x, int y, int ctb_size)
Definition: hevc_filter.c:839
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:120
AVMasteringDisplayMetadata::display_primaries
AVRational display_primaries[3][2]
CIE 1931 xy chromaticity coords of color primaries (r, g, b order).
Definition: mastering_display_metadata.h:42
AVMasteringDisplayMetadata::has_luminance
int has_luminance
Flag indicating whether the luminance (min_ and max_) have been set.
Definition: mastering_display_metadata.h:67
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1324
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:547
HEVCLocalContext::ctb_up_flag
uint8_t ctb_up_flag
Definition: hevcdec.h:444
HEVCFrame::needs_fg
int needs_fg
Definition: hevcdec.h:399
mv
static const int8_t mv[256][2]
Definition: 4xm.c:79
SliceHeader::num_entry_point_offsets
int num_entry_point_offsets
Definition: hevcdec.h:304
HEVC_NAL_STSA_N
@ HEVC_NAL_STSA_N
Definition: hevc.h:33
AV_FRAME_DATA_FILM_GRAIN_PARAMS
@ AV_FRAME_DATA_FILM_GRAIN_PARAMS
Film grain parameters for a frame, described by AVFilmGrainParams.
Definition: frame.h:183
PART_2NxnU
@ PART_2NxnU
Definition: hevcdec.h:147
AVFilmGrainH274Params::blending_mode_id
int blending_mode_id
Specifies the blending mode used to blend the simulated film grain with the decoded images.
Definition: film_grain_params.h:160
ff_hevc_cu_qp_delta_abs
int ff_hevc_cu_qp_delta_abs(HEVCContext *s)
Definition: hevc_cabac.c:640
AV_FRAME_DATA_S12M_TIMECODE
@ AV_FRAME_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1.
Definition: frame.h:151
av_mod_uintp2
#define av_mod_uintp2
Definition: common.h:123
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:112
AVContentLightMetadata::MaxCLL
unsigned MaxCLL
Max content light level (cd/m^2).
Definition: mastering_display_metadata.h:102
H2645NAL::nuh_layer_id
int nuh_layer_id
Definition: h2645_parse.h:64
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
set_deblocking_bypass
static void set_deblocking_bypass(HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:1289
pixdesc.h
HEVCFrame::tab_mvf
MvField * tab_mvf
Definition: hevcdec.h:400
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:953
TransformUnit::cu_qp_delta
int cu_qp_delta
Definition: hevcdec.h:369
HEVC_NAL_TSA_N
@ HEVC_NAL_TSA_N
Definition: hevc.h:31
w
uint8_t w
Definition: llviddspenc.c:38
HWACCEL_DXVA2
#define HWACCEL_DXVA2(codec)
Definition: hwconfig.h:67
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:579
HEVCFrame::hwaccel_picture_private
void * hwaccel_picture_private
Definition: hevcdec.h:412
av_display_matrix_flip
void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip)
Flip the input matrix horizontally and/or vertically.
Definition: display.c:65
AVPacket::data
uint8_t * data
Definition: packet.h:373
PAR
#define PAR
Definition: hevcdec.c:3762
INTRA_DC
@ INTRA_DC
Definition: hevcdec.h:174
AVOption
AVOption.
Definition: opt.h:247
HWACCEL_D3D11VA2
#define HWACCEL_D3D11VA2(codec)
Definition: hwconfig.h:69
ff_h2645_packet_uninit
void ff_h2645_packet_uninit(H2645Packet *pkt)
Free all the allocated memory in the packet.
Definition: h2645_parse.c:523
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:479
hevc_decode_free
static av_cold int hevc_decode_free(AVCodecContext *avctx)
Definition: hevcdec.c:3508
ff_hevc_hls_filters
void ff_hevc_hls_filters(HEVCContext *s, int x_ctb, int y_ctb, int ctb_size)
Definition: hevc_filter.c:875
data
const char data[16]
Definition: mxf.c:143
Mv::y
int16_t y
vertical component of motion vector
Definition: hevcdec.h:340
ff_hevc_mpm_idx_decode
int ff_hevc_mpm_idx_decode(HEVCContext *s)
Definition: hevc_cabac.c:759
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:391
SAO_EDGE
@ SAO_EDGE
Definition: hevcdec.h:213
ff_hevc_hls_residual_coding
void ff_hevc_hls_residual_coding(HEVCContext *s, int x0, int y0, int log2_trafo_size, enum ScanType scan_idx, int c_idx)
Definition: hevc_cabac.c:1031
SliceHeader::slice_temporal_mvp_enabled_flag
uint8_t slice_temporal_mvp_enabled_flag
Definition: hevcdec.h:276
MvField::mv
Mv mv[2]
Definition: hevcdec.h:344
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:219
TransformUnit::is_cu_qp_delta_coded
uint8_t is_cu_qp_delta_coded
Definition: hevcdec.h:377
HEVC_NAL_RASL_N
@ HEVC_NAL_RASL_N
Definition: hevc.h:37
HEVC_NAL_STSA_R
@ HEVC_NAL_STSA_R
Definition: hevc.h:34
MODE_INTRA
@ MODE_INTRA
Definition: hevcdec.h:155
AVFilmGrainH274Params::color_range
enum AVColorRange color_range
Definition: film_grain_params.h:149
av_display_rotation_set
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure counterclockwise rotation by the specified angle...
Definition: display.c:50
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:84
HEVC_NAL_BLA_W_RADL
@ HEVC_NAL_BLA_W_RADL
Definition: hevc.h:46
SliceHeader::slice_loop_filter_across_slices_enabled_flag
uint8_t slice_loop_filter_across_slices_enabled_flag
Definition: hevcdec.h:285
SAOParams::offset_sign
int offset_sign[3][4]
sao_offset_sign
Definition: hevcdsp.h:36
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
export_stream_params
static void export_stream_params(HEVCContext *s, const HEVCSPS *sps)
Definition: hevcdec.c:318
HEVCLocalContext::ctb_up_left_flag
uint8_t ctb_up_left_flag
Definition: hevcdec.h:446
H2645NAL::temporal_id
int temporal_id
HEVC only, nuh_temporal_id_plus_1 - 1.
Definition: h2645_parse.h:59
av_timecode_get_smpte
uint32_t av_timecode_get_smpte(AVRational rate, int drop, int hh, int mm, int ss, int ff)
Convert sei info to SMPTE 12M binary representation.
Definition: timecode.c:68
RefPicList
Definition: hevcdec.h:237
AV_STEREO3D_VIEW_RIGHT
@ AV_STEREO3D_VIEW_RIGHT
Frame contains only the right view.
Definition: stereo3d.h:161
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:660
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
OFFSET
#define OFFSET(x)
Definition: hevcdec.c:3761
AVFilmGrainParams::seed
uint64_t seed
Seed to use for the synthesis process, if the codec allows for it.
Definition: film_grain_params.h:228
PF_INTRA
@ PF_INTRA
Definition: hevcdec.h:166
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:317
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
MODE_SKIP
@ MODE_SKIP
Definition: hevcdec.h:156
HEVCLocalContext::end_of_tiles_x
int end_of_tiles_x
Definition: hevcdec.h:447
AVContentLightMetadata
Content light level needed by to transmit HDR over HDMI (CTA-861.3).
Definition: mastering_display_metadata.h:98
CodingUnit::x
int x
Definition: hevcdec.h:326
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:468
BOUNDARY_LEFT_TILE
#define BOUNDARY_LEFT_TILE
Definition: hevcdec.h:461
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:1710
golomb.h
exp golomb vlc stuff
AVCodecInternal::is_copy
int is_copy
When using frame-threaded decoding, this field is set for the first worker thread (e....
Definition: internal.h:128
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:67
PART_2Nx2N
@ PART_2Nx2N
Definition: hevcdec.h:143
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:380
SET_SAO
#define SET_SAO(elem, value)
Definition: hevcdec.c:981
HEVCLocalContext::ctb_up_right_flag
uint8_t ctb_up_right_flag
Definition: hevcdec.h:445
ff_hevc_clear_refs
void ff_hevc_clear_refs(HEVCContext *s)
Mark all frames in DPB as unused for reference.
Definition: hevc_refs.c:68
PRED_BI
@ PRED_BI
Definition: hevcdec.h:162
U
#define U(x)
Definition: vp56_arith.h:37
av_ceil_log2
#define av_ceil_log2
Definition: common.h:93
ff_hevc_split_coding_unit_flag_decode
int ff_hevc_split_coding_unit_flag_decode(HEVCContext *s, int ct_depth, int x0, int y0)
Definition: hevc_cabac.c:693
fail
#define fail()
Definition: checkasm.h:127
PredictionUnit::intra_pred_mode_c
uint8_t intra_pred_mode_c[4]
Definition: hevcdec.h:364
ff_hevc_sao_merge_flag_decode
int ff_hevc_sao_merge_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:568
AVCodecContext::thread_count
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
Definition: avcodec.h:1440
md5
struct AVMD5 * md5
Definition: movenc.c:56
InterPredIdc
InterPredIdc
Definition: hevcdec.h:159
MODE_INTER
@ MODE_INTER
Definition: hevcdec.h:154
HEVCSEIA53Caption
Definition: hevc_sei.h:60
timecode.h
GetBitContext
Definition: get_bits.h:62
HEVCLocalContext::pu
PredictionUnit pu
Definition: hevcdec.h:457
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
decode_lt_rps
static int decode_lt_rps(HEVCContext *s, LongTermRPS *rps, GetBitContext *gb)
Definition: hevcdec.c:261
TransformUnit::res_scale_val
int res_scale_val
Definition: hevcdec.h:371
HEVCSEIFilmGrainCharacteristics::present
int present
Definition: hevc_sei.h:113
SliceHeader::short_term_ref_pic_set_size
int short_term_ref_pic_set_size
Definition: hevcdec.h:267
hevc_decoder_class
static const AVClass hevc_decoder_class
Definition: hevcdec.c:3772
val
static double val(void *priv, double ch)
Definition: aeval.c:76
HWACCEL_VDPAU
#define HWACCEL_VDPAU(codec)
Definition: hwconfig.h:75
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
ff_hevc_output_frame
int ff_hevc_output_frame(HEVCContext *s, AVFrame *out, int flush)
Find next frame in output order and put a reference to it in frame.
Definition: hevc_refs.c:176
HEVCSEIFilmGrainCharacteristics::bit_depth_chroma
int bit_depth_chroma
Definition: hevc_sei.h:117
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:571
SliceHeader::long_term_ref_pic_set_size
int long_term_ref_pic_set_size
Definition: hevcdec.h:270
HEVCSEIFilmGrainCharacteristics::log2_scale_factor
int log2_scale_factor
Definition: hevc_sei.h:123
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:261
ff_hevc_luma_mv_mvp_mode
void ff_hevc_luma_mv_mvp_mode(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv, int mvp_lx_flag, int LX)
Definition: hevc_mvs.c:582
CTB
#define CTB(tab, x, y)
Definition: hevcdec.c:979
ff_reset_entries
void ff_reset_entries(AVCodecContext *avctx)
Definition: pthread_slice.c:238
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
ff_hevc_skip_flag_decode
int ff_hevc_skip_flag_decode(HEVCContext *s, int x0, int y0, int x_cb, int y_cb)
Definition: hevc_cabac.c:625
ff_hevc_merge_flag_decode
int ff_hevc_merge_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:799
AVRational::num
int num
Numerator.
Definition: rational.h:59
HWACCEL_VIDEOTOOLBOX
@ HWACCEL_VIDEOTOOLBOX
Definition: ffmpeg.h:63
SliceHeader::slice_segment_addr
unsigned int slice_segment_addr
address (in raster order) of the first block in the current slice
Definition: hevcdec.h:252
AVFilmGrainH274Params::intensity_interval_upper_bound
uint8_t intensity_interval_upper_bound[3][256]
Specifies the upper bound of each intensity interval for which the set of model values applies for th...
Definition: film_grain_params.h:194
hevc_parse.h
MvField::ref_idx
int8_t ref_idx[2]
Definition: hevcdec.h:345
ff_hevc_save_states
void ff_hevc_save_states(HEVCContext *s, int ctb_addr_ts)
Definition: hevc_cabac.c:450
AVFilmGrainH274Params::bit_depth_luma
int bit_depth_luma
Specifies the bit depth used for the luma component.
Definition: film_grain_params.h:142
ff_hevc_deblocking_boundary_strengths
void ff_hevc_deblocking_boundary_strengths(HEVCContext *s, int x0, int y0, int log2_trafo_size)
Definition: hevc_filter.c:711
SAOParams::eo_class
int eo_class[3]
sao_eo_class
Definition: hevcdsp.h:40
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:99
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:394
ff_hevc_prev_intra_luma_pred_flag_decode
int ff_hevc_prev_intra_luma_pred_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:754
ff_hevc_decode_nal_sei
int ff_hevc_decode_nal_sei(GetBitContext *gb, void *logctx, HEVCSEI *s, const HEVCParamSets *ps, int type)
Definition: hevc_sei.c:540
ff_thread_report_progress2
void ff_thread_report_progress2(AVCodecContext *avctx, int field, int thread, int n)
Definition: pthread_slice.c:174
first
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But first
Definition: rate_distortion.txt:12
hls_decode_entry_wpp
static int hls_decode_entry_wpp(AVCodecContext *avctxt, void *input_ctb_row, int job, int self_id)
Definition: hevcdec.c:2529
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:946
AV_STEREO3D_FRAMESEQUENCE
@ AV_STEREO3D_FRAMESEQUENCE
Views are alternated temporally.
Definition: stereo3d.h:92
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
QPEL_EXTRA_AFTER
#define QPEL_EXTRA_AFTER
Definition: hevcdec.h:65
HEVC_NAL_BLA_N_LP
@ HEVC_NAL_BLA_N_LP
Definition: hevc.h:47
SAOParams::type_idx
uint8_t type_idx[3]
sao_type_idx
Definition: hevcdsp.h:44
film_grain_params.h
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
TransformUnit::intra_pred_mode
int intra_pred_mode
Definition: hevcdec.h:374
ff_hevc_hls_mvd_coding
void ff_hevc_hls_mvd_coding(HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevc_cabac.c:1541
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:678
HEVCSEIFilmGrainCharacteristics::full_range
int full_range
Definition: hevc_sei.h:118
HEVC_NAL_RADL_R
@ HEVC_NAL_RADL_R
Definition: hevc.h:36
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:588
SliceHeader::cabac_init_flag
uint8_t cabac_init_flag
Definition: hevcdec.h:283
HEVC_MAX_REFS
@ HEVC_MAX_REFS
Definition: hevc.h:119
H2645NAL::size
int size
Definition: h2645_parse.h:37
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
hls_pcm_sample
static int hls_pcm_sample(HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:1427
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:485
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:679
VUI::matrix_coeffs
uint8_t matrix_coeffs
Definition: hevc_ps.h:61
width
#define width
QPEL_EXTRA_BEFORE
#define QPEL_EXTRA_BEFORE
Definition: hevcdec.h:64
stereo3d.h
AVMasteringDisplayMetadata::white_point
AVRational white_point[2]
CIE 1931 xy chromaticity coords of white point.
Definition: mastering_display_metadata.h:47
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:127
s
#define s(width, name)
Definition: cbs_vp9.c:257
ff_thread_await_progress2
void ff_thread_await_progress2(AVCodecContext *avctx, int field, int thread, int shift)
Definition: pthread_slice.c:185
AVFilmGrainParams::codec
union AVFilmGrainParams::@293 codec
Additional fields may be added both here and in any structure included.
SAO_NOT_APPLIED
@ SAO_NOT_APPLIED
Definition: hevcdec.h:211
hls_sao_param
static void hls_sao_param(HEVCContext *s, int rx, int ry)
Definition: hevcdec.c:993
set_sps
static int set_sps(HEVCContext *s, const HEVCSPS *sps, enum AVPixelFormat pix_fmt)
Definition: hevcdec.c:480
HEVCSEIFilmGrainCharacteristics::num_intensity_intervals
uint16_t num_intensity_intervals[3]
Definition: hevc_sei.h:125
AV_ZERO32
#define AV_ZERO32(d)
Definition: intreadwrite.h:629
ff_hevc_ref_idx_lx_decode
int ff_hevc_ref_idx_lx_decode(HEVCContext *s, int num_ref_idx_lx)
Definition: hevc_cabac.c:814
s1
#define s1
Definition: regdef.h:38
ff_hevc_nal_is_nonref
static av_always_inline int ff_hevc_nal_is_nonref(enum HEVCNALUnitType type)
Definition: hevcdec.h:640
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
av_film_grain_params_create_side_data
AVFilmGrainParams * av_film_grain_params_create_side_data(AVFrame *frame)
Allocate a complete AVFilmGrainParams and add it to the frame.
Definition: film_grain_params.c:31
luma_intra_pred_mode
static int luma_intra_pred_mode(HEVCContext *s, int x0, int y0, int pu_size, int prev_intra_luma_pred_flag)
8.4.1
Definition: hevcdec.c:1972
ff_hevc_set_new_ref
int ff_hevc_set_new_ref(HEVCContext *s, AVFrame **frame, int poc)
Definition: hevc_refs.c:137
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:290
SliceHeader::slice_rps
ShortTermRPS slice_rps
Definition: hevcdec.h:268
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
HEVCSEIFilmGrainCharacteristics::model_id
int model_id
Definition: hevc_sei.h:114
ff_hevc_cu_transquant_bypass_flag_decode
int ff_hevc_cu_transquant_bypass_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:620
IS_IDR
#define IS_IDR(s)
Definition: hevcdec.h:76
ff_hevc_intra_chroma_pred_mode_decode
int ff_hevc_intra_chroma_pred_mode_decode(HEVCContext *s)
Definition: hevc_cabac.c:777
set_ct_depth
static av_always_inline void set_ct_depth(HEVCContext *s, int x0, int y0, int log2_cb_size, int ct_depth)
Definition: hevcdec.c:2052
H2645NAL::data
const uint8_t * data
Definition: h2645_parse.h:38
ff_hevc_slice_rpl
int ff_hevc_slice_rpl(HEVCContext *s)
Construct the reference picture list(s) for the current slice.
Definition: hevc_refs.c:299
RefPicList::ref
struct HEVCFrame * ref[HEVC_MAX_REFS]
Definition: hevcdec.h:238
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demuxing_decoding.c:41
ff_hevc_sao_offset_abs_decode
int ff_hevc_sao_offset_abs_decode(HEVCContext *s)
Definition: hevc_cabac.c:593
H2645NAL::skipped_bytes_pos
int * skipped_bytes_pos
Definition: h2645_parse.h:68
VUI::colour_primaries
uint8_t colour_primaries
Definition: hevc_ps.h:59
HEVC_SLICE_I
@ HEVC_SLICE_I
Definition: hevc.h:98
hls_coding_unit
static int hls_coding_unit(HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:2160
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
SliceHeader::size
int * size
Definition: hevcdec.h:303
HEVCSEIUnregistered::buf_ref
AVBufferRef ** buf_ref
Definition: hevc_sei.h:65
AVFilmGrainH274Params::comp_model_value
int16_t comp_model_value[3][256][6]
Specifies the model values for the component for each intensity interval.
Definition: film_grain_params.h:205
SliceHeader::collocated_list
uint8_t collocated_list
Definition: hevcdec.h:286
atomic_load
#define atomic_load(object)
Definition: stdatomic.h:93
ff_hevc_luma_mv_merge_mode
void ff_hevc_luma_mv_merge_mode(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv)
Definition: hevc_mvs.c:479
AVCOL_PRI_UNSPECIFIED
@ AVCOL_PRI_UNSPECIFIED
Definition: pixfmt.h:454
HEVC_MAX_PPS_COUNT
@ HEVC_MAX_PPS_COUNT
Definition: hevc.h:114
AVDISCARD_BIDIR
@ AVDISCARD_BIDIR
discard all bidirectional frames
Definition: defs.h:51
get_se_golomb
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
Definition: golomb.h:241
INTRA_ANGULAR_26
@ INTRA_ANGULAR_26
Definition: hevcdec.h:199
H2645NAL::type
int type
NAL unit type.
Definition: h2645_parse.h:54
CodingUnit::max_trafo_depth
uint8_t max_trafo_depth
MaxTrafoDepth.
Definition: hevcdec.h:334
SliceHeader::slice_ctb_addr_rs
int slice_ctb_addr_rs
Definition: hevcdec.h:322
FF_CODEC_PROPERTY_FILM_GRAIN
#define FF_CODEC_PROPERTY_FILM_GRAIN
Definition: avcodec.h:1825
arg
const char * arg
Definition: jacosubdec.c:67
AVStereo3D::flags
int flags
Additional information about the frame packing.
Definition: stereo3d.h:185
if
if(ret)
Definition: filter_design.txt:179
HEVC_NAL_IDR_N_LP
@ HEVC_NAL_IDR_N_LP
Definition: hevc.h:49
AVFilmGrainH274Params::model_id
int model_id
Specifies the film grain simulation mode.
Definition: film_grain_params.h:137
SliceHeader::pic_output_flag
uint8_t pic_output_flag
Definition: hevcdec.h:262
hls_slice_data_wpp
static int hls_slice_data_wpp(HEVCContext *s, const H2645NAL *nal)
Definition: hevcdec.c:2610
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:113
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:54
ff_hevc_sao_offset_sign_decode
int ff_hevc_sao_offset_sign_decode(HEVCContext *s)
Definition: hevc_cabac.c:603
PredictionUnit::rem_intra_luma_pred_mode
int rem_intra_luma_pred_mode
Definition: hevcdec.h:360
H2645NAL::raw_size
int raw_size
Definition: h2645_parse.h:46
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
IS_BLA
#define IS_BLA(s)
Definition: hevcdec.h:77
ff_hevc_merge_idx_decode
int ff_hevc_merge_idx_decode(HEVCContext *s)
Definition: hevc_cabac.c:788
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
ff_bswapdsp_init
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
HEVC_SLICE_B
@ HEVC_SLICE_B
Definition: hevc.h:96
flush
static void flush(AVCodecContext *avctx)
Definition: aacdec_template.c:593
NULL
#define NULL
Definition: coverity.c:32
hevc_ref_frame
static int hevc_ref_frame(HEVCContext *s, HEVCFrame *dst, HEVCFrame *src)
Definition: hevcdec.c:3461
HEVCSEIFilmGrainCharacteristics::separate_colour_description_present_flag
int separate_colour_description_present_flag
Definition: hevc_sei.h:115
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:967
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
HEVCLocalContext::tmp
int16_t tmp[MAX_PB_SIZE *MAX_PB_SIZE]
Definition: hevcdec.h:453
ff_hevc_ps_uninit
void ff_hevc_ps_uninit(HEVCParamSets *ps)
Definition: hevc_ps.c:1747
HEVC_NAL_PPS
@ HEVC_NAL_PPS
Definition: hevc.h:63
LongTermRPS::poc
int poc[32]
Definition: hevcdec.h:231
AVCHROMA_LOC_LEFT
@ AVCHROMA_LOC_LEFT
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
Definition: pixfmt.h:600
CodingUnit::cu_transquant_bypass_flag