FFmpeg
hevcdec.c
Go to the documentation of this file.
1 /*
2  * HEVC video Decoder
3  *
4  * Copyright (C) 2012 - 2013 Guillaume Martres
5  * Copyright (C) 2012 - 2013 Mickael Raulet
6  * Copyright (C) 2012 - 2013 Gildas Cocherel
7  * Copyright (C) 2012 - 2013 Wassim Hamidouche
8  *
9  * This file is part of FFmpeg.
10  *
11  * FFmpeg is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public
13  * License as published by the Free Software Foundation; either
14  * version 2.1 of the License, or (at your option) any later version.
15  *
16  * FFmpeg is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with FFmpeg; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24  */
25 
26 #include "libavutil/attributes.h"
27 #include "libavutil/common.h"
28 #include "libavutil/display.h"
29 #include "libavutil/internal.h"
31 #include "libavutil/md5.h"
32 #include "libavutil/opt.h"
33 #include "libavutil/pixdesc.h"
34 #include "libavutil/stereo3d.h"
35 
36 #include "bswapdsp.h"
37 #include "bytestream.h"
38 #include "cabac_functions.h"
39 #include "golomb.h"
40 #include "hevc.h"
41 #include "hevc_data.h"
42 #include "hevc_parse.h"
43 #include "hevcdec.h"
44 #include "hwaccel.h"
45 #include "profiles.h"
46 
47 const uint8_t ff_hevc_pel_weight[65] = { [2] = 0, [4] = 1, [6] = 2, [8] = 3, [12] = 4, [16] = 5, [24] = 6, [32] = 7, [48] = 8, [64] = 9 };
48 
49 /**
50  * NOTE: Each function hls_foo correspond to the function foo in the
51  * specification (HLS stands for High Level Syntax).
52  */
53 
54 /**
55  * Section 5.7
56  */
57 
58 /* free everything allocated by pic_arrays_init() */
60 {
61  av_freep(&s->sao);
62  av_freep(&s->deblock);
63 
64  av_freep(&s->skip_flag);
65  av_freep(&s->tab_ct_depth);
66 
67  av_freep(&s->tab_ipm);
68  av_freep(&s->cbf_luma);
69  av_freep(&s->is_pcm);
70 
71  av_freep(&s->qp_y_tab);
72  av_freep(&s->tab_slice_address);
73  av_freep(&s->filter_slice_edges);
74 
75  av_freep(&s->horizontal_bs);
76  av_freep(&s->vertical_bs);
77 
78  av_freep(&s->sh.entry_point_offset);
79  av_freep(&s->sh.size);
80  av_freep(&s->sh.offset);
81 
82  av_buffer_pool_uninit(&s->tab_mvf_pool);
83  av_buffer_pool_uninit(&s->rpl_tab_pool);
84 }
85 
86 /* allocate arrays that depend on frame dimensions */
87 static int pic_arrays_init(HEVCContext *s, const HEVCSPS *sps)
88 {
89  int log2_min_cb_size = sps->log2_min_cb_size;
90  int width = sps->width;
91  int height = sps->height;
92  int pic_size_in_ctb = ((width >> log2_min_cb_size) + 1) *
93  ((height >> log2_min_cb_size) + 1);
94  int ctb_count = sps->ctb_width * sps->ctb_height;
95  int min_pu_size = sps->min_pu_width * sps->min_pu_height;
96 
97  s->bs_width = (width >> 2) + 1;
98  s->bs_height = (height >> 2) + 1;
99 
100  s->sao = av_mallocz_array(ctb_count, sizeof(*s->sao));
101  s->deblock = av_mallocz_array(ctb_count, sizeof(*s->deblock));
102  if (!s->sao || !s->deblock)
103  goto fail;
104 
105  s->skip_flag = av_malloc_array(sps->min_cb_height, sps->min_cb_width);
106  s->tab_ct_depth = av_malloc_array(sps->min_cb_height, sps->min_cb_width);
107  if (!s->skip_flag || !s->tab_ct_depth)
108  goto fail;
109 
110  s->cbf_luma = av_malloc_array(sps->min_tb_width, sps->min_tb_height);
111  s->tab_ipm = av_mallocz(min_pu_size);
112  s->is_pcm = av_malloc_array(sps->min_pu_width + 1, sps->min_pu_height + 1);
113  if (!s->tab_ipm || !s->cbf_luma || !s->is_pcm)
114  goto fail;
115 
116  s->filter_slice_edges = av_mallocz(ctb_count);
117  s->tab_slice_address = av_malloc_array(pic_size_in_ctb,
118  sizeof(*s->tab_slice_address));
119  s->qp_y_tab = av_malloc_array(pic_size_in_ctb,
120  sizeof(*s->qp_y_tab));
121  if (!s->qp_y_tab || !s->filter_slice_edges || !s->tab_slice_address)
122  goto fail;
123 
124  s->horizontal_bs = av_mallocz_array(s->bs_width, s->bs_height);
125  s->vertical_bs = av_mallocz_array(s->bs_width, s->bs_height);
126  if (!s->horizontal_bs || !s->vertical_bs)
127  goto fail;
128 
129  s->tab_mvf_pool = av_buffer_pool_init(min_pu_size * sizeof(MvField),
131  s->rpl_tab_pool = av_buffer_pool_init(ctb_count * sizeof(RefPicListTab),
133  if (!s->tab_mvf_pool || !s->rpl_tab_pool)
134  goto fail;
135 
136  return 0;
137 
138 fail:
140  return AVERROR(ENOMEM);
141 }
142 
144 {
145  int i = 0;
146  int j = 0;
147  uint8_t luma_weight_l0_flag[16];
148  uint8_t chroma_weight_l0_flag[16];
149  uint8_t luma_weight_l1_flag[16];
150  uint8_t chroma_weight_l1_flag[16];
151  int luma_log2_weight_denom;
152 
153  luma_log2_weight_denom = get_ue_golomb_long(gb);
154  if (luma_log2_weight_denom < 0 || luma_log2_weight_denom > 7) {
155  av_log(s->avctx, AV_LOG_ERROR, "luma_log2_weight_denom %d is invalid\n", luma_log2_weight_denom);
156  return AVERROR_INVALIDDATA;
157  }
158  s->sh.luma_log2_weight_denom = av_clip_uintp2(luma_log2_weight_denom, 3);
159  if (s->ps.sps->chroma_format_idc != 0) {
160  int64_t chroma_log2_weight_denom = luma_log2_weight_denom + (int64_t)get_se_golomb(gb);
161  if (chroma_log2_weight_denom < 0 || chroma_log2_weight_denom > 7) {
162  av_log(s->avctx, AV_LOG_ERROR, "chroma_log2_weight_denom %"PRId64" is invalid\n", chroma_log2_weight_denom);
163  return AVERROR_INVALIDDATA;
164  }
165  s->sh.chroma_log2_weight_denom = chroma_log2_weight_denom;
166  }
167 
168  for (i = 0; i < s->sh.nb_refs[L0]; i++) {
169  luma_weight_l0_flag[i] = get_bits1(gb);
170  if (!luma_weight_l0_flag[i]) {
171  s->sh.luma_weight_l0[i] = 1 << s->sh.luma_log2_weight_denom;
172  s->sh.luma_offset_l0[i] = 0;
173  }
174  }
175  if (s->ps.sps->chroma_format_idc != 0) {
176  for (i = 0; i < s->sh.nb_refs[L0]; i++)
177  chroma_weight_l0_flag[i] = get_bits1(gb);
178  } else {
179  for (i = 0; i < s->sh.nb_refs[L0]; i++)
180  chroma_weight_l0_flag[i] = 0;
181  }
182  for (i = 0; i < s->sh.nb_refs[L0]; i++) {
183  if (luma_weight_l0_flag[i]) {
184  int delta_luma_weight_l0 = get_se_golomb(gb);
185  if ((int8_t)delta_luma_weight_l0 != delta_luma_weight_l0)
186  return AVERROR_INVALIDDATA;
187  s->sh.luma_weight_l0[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l0;
188  s->sh.luma_offset_l0[i] = get_se_golomb(gb);
189  }
190  if (chroma_weight_l0_flag[i]) {
191  for (j = 0; j < 2; j++) {
192  int delta_chroma_weight_l0 = get_se_golomb(gb);
193  int delta_chroma_offset_l0 = get_se_golomb(gb);
194 
195  if ( (int8_t)delta_chroma_weight_l0 != delta_chroma_weight_l0
196  || delta_chroma_offset_l0 < -(1<<17) || delta_chroma_offset_l0 > (1<<17)) {
197  return AVERROR_INVALIDDATA;
198  }
199 
200  s->sh.chroma_weight_l0[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l0;
201  s->sh.chroma_offset_l0[i][j] = av_clip((delta_chroma_offset_l0 - ((128 * s->sh.chroma_weight_l0[i][j])
202  >> s->sh.chroma_log2_weight_denom) + 128), -128, 127);
203  }
204  } else {
205  s->sh.chroma_weight_l0[i][0] = 1 << s->sh.chroma_log2_weight_denom;
206  s->sh.chroma_offset_l0[i][0] = 0;
207  s->sh.chroma_weight_l0[i][1] = 1 << s->sh.chroma_log2_weight_denom;
208  s->sh.chroma_offset_l0[i][1] = 0;
209  }
210  }
211  if (s->sh.slice_type == HEVC_SLICE_B) {
212  for (i = 0; i < s->sh.nb_refs[L1]; i++) {
213  luma_weight_l1_flag[i] = get_bits1(gb);
214  if (!luma_weight_l1_flag[i]) {
215  s->sh.luma_weight_l1[i] = 1 << s->sh.luma_log2_weight_denom;
216  s->sh.luma_offset_l1[i] = 0;
217  }
218  }
219  if (s->ps.sps->chroma_format_idc != 0) {
220  for (i = 0; i < s->sh.nb_refs[L1]; i++)
221  chroma_weight_l1_flag[i] = get_bits1(gb);
222  } else {
223  for (i = 0; i < s->sh.nb_refs[L1]; i++)
224  chroma_weight_l1_flag[i] = 0;
225  }
226  for (i = 0; i < s->sh.nb_refs[L1]; i++) {
227  if (luma_weight_l1_flag[i]) {
228  int delta_luma_weight_l1 = get_se_golomb(gb);
229  if ((int8_t)delta_luma_weight_l1 != delta_luma_weight_l1)
230  return AVERROR_INVALIDDATA;
231  s->sh.luma_weight_l1[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l1;
232  s->sh.luma_offset_l1[i] = get_se_golomb(gb);
233  }
234  if (chroma_weight_l1_flag[i]) {
235  for (j = 0; j < 2; j++) {
236  int delta_chroma_weight_l1 = get_se_golomb(gb);
237  int delta_chroma_offset_l1 = get_se_golomb(gb);
238 
239  if ( (int8_t)delta_chroma_weight_l1 != delta_chroma_weight_l1
240  || delta_chroma_offset_l1 < -(1<<17) || delta_chroma_offset_l1 > (1<<17)) {
241  return AVERROR_INVALIDDATA;
242  }
243 
244  s->sh.chroma_weight_l1[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l1;
245  s->sh.chroma_offset_l1[i][j] = av_clip((delta_chroma_offset_l1 - ((128 * s->sh.chroma_weight_l1[i][j])
246  >> s->sh.chroma_log2_weight_denom) + 128), -128, 127);
247  }
248  } else {
249  s->sh.chroma_weight_l1[i][0] = 1 << s->sh.chroma_log2_weight_denom;
250  s->sh.chroma_offset_l1[i][0] = 0;
251  s->sh.chroma_weight_l1[i][1] = 1 << s->sh.chroma_log2_weight_denom;
252  s->sh.chroma_offset_l1[i][1] = 0;
253  }
254  }
255  }
256  return 0;
257 }
258 
260 {
261  const HEVCSPS *sps = s->ps.sps;
262  int max_poc_lsb = 1 << sps->log2_max_poc_lsb;
263  int prev_delta_msb = 0;
264  unsigned int nb_sps = 0, nb_sh;
265  int i;
266 
267  rps->nb_refs = 0;
268  if (!sps->long_term_ref_pics_present_flag)
269  return 0;
270 
271  if (sps->num_long_term_ref_pics_sps > 0)
272  nb_sps = get_ue_golomb_long(gb);
273  nb_sh = get_ue_golomb_long(gb);
274 
275  if (nb_sps > sps->num_long_term_ref_pics_sps)
276  return AVERROR_INVALIDDATA;
277  if (nb_sh + (uint64_t)nb_sps > FF_ARRAY_ELEMS(rps->poc))
278  return AVERROR_INVALIDDATA;
279 
280  rps->nb_refs = nb_sh + nb_sps;
281 
282  for (i = 0; i < rps->nb_refs; i++) {
283  uint8_t delta_poc_msb_present;
284 
285  if (i < nb_sps) {
286  uint8_t lt_idx_sps = 0;
287 
288  if (sps->num_long_term_ref_pics_sps > 1)
289  lt_idx_sps = get_bits(gb, av_ceil_log2(sps->num_long_term_ref_pics_sps));
290 
291  rps->poc[i] = sps->lt_ref_pic_poc_lsb_sps[lt_idx_sps];
292  rps->used[i] = sps->used_by_curr_pic_lt_sps_flag[lt_idx_sps];
293  } else {
294  rps->poc[i] = get_bits(gb, sps->log2_max_poc_lsb);
295  rps->used[i] = get_bits1(gb);
296  }
297 
298  delta_poc_msb_present = get_bits1(gb);
299  if (delta_poc_msb_present) {
300  int64_t delta = get_ue_golomb_long(gb);
301  int64_t poc;
302 
303  if (i && i != nb_sps)
304  delta += prev_delta_msb;
305 
306  poc = rps->poc[i] + s->poc - delta * max_poc_lsb - s->sh.pic_order_cnt_lsb;
307  if (poc != (int32_t)poc)
308  return AVERROR_INVALIDDATA;
309  rps->poc[i] = poc;
310  prev_delta_msb = delta;
311  }
312  }
313 
314  return 0;
315 }
316 
318 {
319  AVCodecContext *avctx = s->avctx;
320  const HEVCParamSets *ps = &s->ps;
321  const HEVCVPS *vps = (const HEVCVPS*)ps->vps_list[sps->vps_id]->data;
322  const HEVCWindow *ow = &sps->output_window;
323  unsigned int num = 0, den = 0;
324 
325  avctx->pix_fmt = sps->pix_fmt;
326  avctx->coded_width = sps->width;
327  avctx->coded_height = sps->height;
328  avctx->width = sps->width - ow->left_offset - ow->right_offset;
329  avctx->height = sps->height - ow->top_offset - ow->bottom_offset;
330  avctx->has_b_frames = sps->temporal_layer[sps->max_sub_layers - 1].num_reorder_pics;
331  avctx->profile = sps->ptl.general_ptl.profile_idc;
332  avctx->level = sps->ptl.general_ptl.level_idc;
333 
334  ff_set_sar(avctx, sps->vui.sar);
335 
336  if (sps->vui.video_signal_type_present_flag)
337  avctx->color_range = sps->vui.video_full_range_flag ? AVCOL_RANGE_JPEG
339  else
340  avctx->color_range = AVCOL_RANGE_MPEG;
341 
342  if (sps->vui.colour_description_present_flag) {
343  avctx->color_primaries = sps->vui.colour_primaries;
344  avctx->color_trc = sps->vui.transfer_characteristic;
345  avctx->colorspace = sps->vui.matrix_coeffs;
346  } else {
350  }
351 
352  if (vps->vps_timing_info_present_flag) {
353  num = vps->vps_num_units_in_tick;
354  den = vps->vps_time_scale;
355  } else if (sps->vui.vui_timing_info_present_flag) {
356  num = sps->vui.vui_num_units_in_tick;
357  den = sps->vui.vui_time_scale;
358  }
359 
360  if (num != 0 && den != 0)
361  av_reduce(&avctx->framerate.den, &avctx->framerate.num,
362  num, den, 1 << 30);
363 
364  if (s->sei.alternative_transfer.present &&
365  av_color_transfer_name(s->sei.alternative_transfer.preferred_transfer_characteristics) &&
366  s->sei.alternative_transfer.preferred_transfer_characteristics != AVCOL_TRC_UNSPECIFIED) {
367  avctx->color_trc = s->sei.alternative_transfer.preferred_transfer_characteristics;
368  }
369 }
370 
372 {
373 #define HWACCEL_MAX (CONFIG_HEVC_DXVA2_HWACCEL + \
374  CONFIG_HEVC_D3D11VA_HWACCEL * 2 + \
375  CONFIG_HEVC_NVDEC_HWACCEL + \
376  CONFIG_HEVC_VAAPI_HWACCEL + \
377  CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL + \
378  CONFIG_HEVC_VDPAU_HWACCEL)
380 
381  switch (sps->pix_fmt) {
382  case AV_PIX_FMT_YUV420P:
383  case AV_PIX_FMT_YUVJ420P:
384 #if CONFIG_HEVC_DXVA2_HWACCEL
386 #endif
387 #if CONFIG_HEVC_D3D11VA_HWACCEL
389  *fmt++ = AV_PIX_FMT_D3D11;
390 #endif
391 #if CONFIG_HEVC_VAAPI_HWACCEL
392  *fmt++ = AV_PIX_FMT_VAAPI;
393 #endif
394 #if CONFIG_HEVC_VDPAU_HWACCEL
395  *fmt++ = AV_PIX_FMT_VDPAU;
396 #endif
397 #if CONFIG_HEVC_NVDEC_HWACCEL
398  *fmt++ = AV_PIX_FMT_CUDA;
399 #endif
400 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
402 #endif
403  break;
405 #if CONFIG_HEVC_DXVA2_HWACCEL
407 #endif
408 #if CONFIG_HEVC_D3D11VA_HWACCEL
410  *fmt++ = AV_PIX_FMT_D3D11;
411 #endif
412 #if CONFIG_HEVC_VAAPI_HWACCEL
413  *fmt++ = AV_PIX_FMT_VAAPI;
414 #endif
415 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
417 #endif
418 #if CONFIG_HEVC_NVDEC_HWACCEL
419  *fmt++ = AV_PIX_FMT_CUDA;
420 #endif
421  break;
422  case AV_PIX_FMT_YUV444P:
423 #if CONFIG_HEVC_VDPAU_HWACCEL
424  *fmt++ = AV_PIX_FMT_VDPAU;
425 #endif
426 #if CONFIG_HEVC_NVDEC_HWACCEL
427  *fmt++ = AV_PIX_FMT_CUDA;
428 #endif
429  break;
433 #if CONFIG_HEVC_NVDEC_HWACCEL
434  *fmt++ = AV_PIX_FMT_CUDA;
435 #endif
436  break;
437  }
438 
439  *fmt++ = sps->pix_fmt;
440  *fmt = AV_PIX_FMT_NONE;
441 
442  return ff_thread_get_format(s->avctx, pix_fmts);
443 }
444 
445 static int set_sps(HEVCContext *s, const HEVCSPS *sps,
446  enum AVPixelFormat pix_fmt)
447 {
448  int ret, i;
449 
451  s->ps.sps = NULL;
452  s->ps.vps = NULL;
453 
454  if (!sps)
455  return 0;
456 
457  ret = pic_arrays_init(s, sps);
458  if (ret < 0)
459  goto fail;
460 
462 
463  s->avctx->pix_fmt = pix_fmt;
464 
465  ff_hevc_pred_init(&s->hpc, sps->bit_depth);
466  ff_hevc_dsp_init (&s->hevcdsp, sps->bit_depth);
467  ff_videodsp_init (&s->vdsp, sps->bit_depth);
468 
469  for (i = 0; i < 3; i++) {
470  av_freep(&s->sao_pixel_buffer_h[i]);
471  av_freep(&s->sao_pixel_buffer_v[i]);
472  }
473 
474  if (sps->sao_enabled && !s->avctx->hwaccel) {
475  int c_count = (sps->chroma_format_idc != 0) ? 3 : 1;
476  int c_idx;
477 
478  for(c_idx = 0; c_idx < c_count; c_idx++) {
479  int w = sps->width >> sps->hshift[c_idx];
480  int h = sps->height >> sps->vshift[c_idx];
481  s->sao_pixel_buffer_h[c_idx] =
482  av_malloc((w * 2 * sps->ctb_height) <<
483  sps->pixel_shift);
484  s->sao_pixel_buffer_v[c_idx] =
485  av_malloc((h * 2 * sps->ctb_width) <<
486  sps->pixel_shift);
487  }
488  }
489 
490  s->ps.sps = sps;
491  s->ps.vps = (HEVCVPS*) s->ps.vps_list[s->ps.sps->vps_id]->data;
492 
493  return 0;
494 
495 fail:
497  s->ps.sps = NULL;
498  return ret;
499 }
500 
502 {
503  GetBitContext *gb = &s->HEVClc->gb;
504  SliceHeader *sh = &s->sh;
505  int i, ret;
506 
507  // Coded parameters
509  if (s->ref && sh->first_slice_in_pic_flag) {
510  av_log(s->avctx, AV_LOG_ERROR, "Two slices reporting being the first in the same frame.\n");
511  return 1; // This slice will be skipped later, do not corrupt state
512  }
513 
514  if ((IS_IDR(s) || IS_BLA(s)) && sh->first_slice_in_pic_flag) {
515  s->seq_decode = (s->seq_decode + 1) & 0xff;
516  s->max_ra = INT_MAX;
517  if (IS_IDR(s))
519  }
521  if (IS_IRAP(s))
523 
524  sh->pps_id = get_ue_golomb_long(gb);
525  if (sh->pps_id >= HEVC_MAX_PPS_COUNT || !s->ps.pps_list[sh->pps_id]) {
526  av_log(s->avctx, AV_LOG_ERROR, "PPS id out of range: %d\n", sh->pps_id);
527  return AVERROR_INVALIDDATA;
528  }
529  if (!sh->first_slice_in_pic_flag &&
530  s->ps.pps != (HEVCPPS*)s->ps.pps_list[sh->pps_id]->data) {
531  av_log(s->avctx, AV_LOG_ERROR, "PPS changed between slices.\n");
532  return AVERROR_INVALIDDATA;
533  }
534  s->ps.pps = (HEVCPPS*)s->ps.pps_list[sh->pps_id]->data;
535  if (s->nal_unit_type == HEVC_NAL_CRA_NUT && s->last_eos == 1)
537 
538  if (s->ps.sps != (HEVCSPS*)s->ps.sps_list[s->ps.pps->sps_id]->data) {
539  const HEVCSPS *sps = (HEVCSPS*)s->ps.sps_list[s->ps.pps->sps_id]->data;
540  const HEVCSPS *last_sps = s->ps.sps;
541  enum AVPixelFormat pix_fmt;
542 
543  if (last_sps && IS_IRAP(s) && s->nal_unit_type != HEVC_NAL_CRA_NUT) {
544  if (sps->width != last_sps->width || sps->height != last_sps->height ||
545  sps->temporal_layer[sps->max_sub_layers - 1].max_dec_pic_buffering !=
546  last_sps->temporal_layer[last_sps->max_sub_layers - 1].max_dec_pic_buffering)
548  }
550 
551  ret = set_sps(s, sps, sps->pix_fmt);
552  if (ret < 0)
553  return ret;
554 
555  pix_fmt = get_format(s, sps);
556  if (pix_fmt < 0)
557  return pix_fmt;
558  s->avctx->pix_fmt = pix_fmt;
559 
560  s->seq_decode = (s->seq_decode + 1) & 0xff;
561  s->max_ra = INT_MAX;
562  }
563 
565  if (!sh->first_slice_in_pic_flag) {
566  int slice_address_length;
567 
568  if (s->ps.pps->dependent_slice_segments_enabled_flag)
570 
571  slice_address_length = av_ceil_log2(s->ps.sps->ctb_width *
572  s->ps.sps->ctb_height);
573  sh->slice_segment_addr = get_bitsz(gb, slice_address_length);
574  if (sh->slice_segment_addr >= s->ps.sps->ctb_width * s->ps.sps->ctb_height) {
575  av_log(s->avctx, AV_LOG_ERROR,
576  "Invalid slice segment address: %u.\n",
577  sh->slice_segment_addr);
578  return AVERROR_INVALIDDATA;
579  }
580 
581  if (!sh->dependent_slice_segment_flag) {
582  sh->slice_addr = sh->slice_segment_addr;
583  s->slice_idx++;
584  }
585  } else {
586  sh->slice_segment_addr = sh->slice_addr = 0;
587  s->slice_idx = 0;
588  s->slice_initialized = 0;
589  }
590 
591  if (!sh->dependent_slice_segment_flag) {
592  s->slice_initialized = 0;
593 
594  for (i = 0; i < s->ps.pps->num_extra_slice_header_bits; i++)
595  skip_bits(gb, 1); // slice_reserved_undetermined_flag[]
596 
597  sh->slice_type = get_ue_golomb_long(gb);
598  if (!(sh->slice_type == HEVC_SLICE_I ||
599  sh->slice_type == HEVC_SLICE_P ||
600  sh->slice_type == HEVC_SLICE_B)) {
601  av_log(s->avctx, AV_LOG_ERROR, "Unknown slice type: %d.\n",
602  sh->slice_type);
603  return AVERROR_INVALIDDATA;
604  }
605  if (IS_IRAP(s) && sh->slice_type != HEVC_SLICE_I) {
606  av_log(s->avctx, AV_LOG_ERROR, "Inter slices in an IRAP frame.\n");
607  return AVERROR_INVALIDDATA;
608  }
609 
610  // when flag is not present, picture is inferred to be output
611  sh->pic_output_flag = 1;
612  if (s->ps.pps->output_flag_present_flag)
613  sh->pic_output_flag = get_bits1(gb);
614 
615  if (s->ps.sps->separate_colour_plane_flag)
616  sh->colour_plane_id = get_bits(gb, 2);
617 
618  if (!IS_IDR(s)) {
619  int poc, pos;
620 
621  sh->pic_order_cnt_lsb = get_bits(gb, s->ps.sps->log2_max_poc_lsb);
622  poc = ff_hevc_compute_poc(s->ps.sps, s->pocTid0, sh->pic_order_cnt_lsb, s->nal_unit_type);
623  if (!sh->first_slice_in_pic_flag && poc != s->poc) {
624  av_log(s->avctx, AV_LOG_WARNING,
625  "Ignoring POC change between slices: %d -> %d\n", s->poc, poc);
626  if (s->avctx->err_recognition & AV_EF_EXPLODE)
627  return AVERROR_INVALIDDATA;
628  poc = s->poc;
629  }
630  s->poc = poc;
631 
633  pos = get_bits_left(gb);
635  ret = ff_hevc_decode_short_term_rps(gb, s->avctx, &sh->slice_rps, s->ps.sps, 1);
636  if (ret < 0)
637  return ret;
638 
639  sh->short_term_rps = &sh->slice_rps;
640  } else {
641  int numbits, rps_idx;
642 
643  if (!s->ps.sps->nb_st_rps) {
644  av_log(s->avctx, AV_LOG_ERROR, "No ref lists in the SPS.\n");
645  return AVERROR_INVALIDDATA;
646  }
647 
648  numbits = av_ceil_log2(s->ps.sps->nb_st_rps);
649  rps_idx = numbits > 0 ? get_bits(gb, numbits) : 0;
650  sh->short_term_rps = &s->ps.sps->st_rps[rps_idx];
651  }
653 
654  pos = get_bits_left(gb);
655  ret = decode_lt_rps(s, &sh->long_term_rps, gb);
656  if (ret < 0) {
657  av_log(s->avctx, AV_LOG_WARNING, "Invalid long term RPS.\n");
658  if (s->avctx->err_recognition & AV_EF_EXPLODE)
659  return AVERROR_INVALIDDATA;
660  }
662 
663  if (s->ps.sps->sps_temporal_mvp_enabled_flag)
665  else
667  } else {
668  s->sh.short_term_rps = NULL;
669  s->poc = 0;
670  }
671 
672  /* 8.3.1 */
673  if (sh->first_slice_in_pic_flag && s->temporal_id == 0 &&
674  s->nal_unit_type != HEVC_NAL_TRAIL_N &&
675  s->nal_unit_type != HEVC_NAL_TSA_N &&
676  s->nal_unit_type != HEVC_NAL_STSA_N &&
677  s->nal_unit_type != HEVC_NAL_RADL_N &&
678  s->nal_unit_type != HEVC_NAL_RADL_R &&
679  s->nal_unit_type != HEVC_NAL_RASL_N &&
680  s->nal_unit_type != HEVC_NAL_RASL_R)
681  s->pocTid0 = s->poc;
682 
683  if (s->ps.sps->sao_enabled) {
685  if (s->ps.sps->chroma_format_idc) {
688  }
689  } else {
693  }
694 
695  sh->nb_refs[L0] = sh->nb_refs[L1] = 0;
696  if (sh->slice_type == HEVC_SLICE_P || sh->slice_type == HEVC_SLICE_B) {
697  int nb_refs;
698 
699  sh->nb_refs[L0] = s->ps.pps->num_ref_idx_l0_default_active;
700  if (sh->slice_type == HEVC_SLICE_B)
701  sh->nb_refs[L1] = s->ps.pps->num_ref_idx_l1_default_active;
702 
703  if (get_bits1(gb)) { // num_ref_idx_active_override_flag
704  sh->nb_refs[L0] = get_ue_golomb_long(gb) + 1;
705  if (sh->slice_type == HEVC_SLICE_B)
706  sh->nb_refs[L1] = get_ue_golomb_long(gb) + 1;
707  }
708  if (sh->nb_refs[L0] > HEVC_MAX_REFS || sh->nb_refs[L1] > HEVC_MAX_REFS) {
709  av_log(s->avctx, AV_LOG_ERROR, "Too many refs: %d/%d.\n",
710  sh->nb_refs[L0], sh->nb_refs[L1]);
711  return AVERROR_INVALIDDATA;
712  }
713 
714  sh->rpl_modification_flag[0] = 0;
715  sh->rpl_modification_flag[1] = 0;
716  nb_refs = ff_hevc_frame_nb_refs(s);
717  if (!nb_refs) {
718  av_log(s->avctx, AV_LOG_ERROR, "Zero refs for a frame with P or B slices.\n");
719  return AVERROR_INVALIDDATA;
720  }
721 
722  if (s->ps.pps->lists_modification_present_flag && nb_refs > 1) {
723  sh->rpl_modification_flag[0] = get_bits1(gb);
724  if (sh->rpl_modification_flag[0]) {
725  for (i = 0; i < sh->nb_refs[L0]; i++)
726  sh->list_entry_lx[0][i] = get_bits(gb, av_ceil_log2(nb_refs));
727  }
728 
729  if (sh->slice_type == HEVC_SLICE_B) {
730  sh->rpl_modification_flag[1] = get_bits1(gb);
731  if (sh->rpl_modification_flag[1] == 1)
732  for (i = 0; i < sh->nb_refs[L1]; i++)
733  sh->list_entry_lx[1][i] = get_bits(gb, av_ceil_log2(nb_refs));
734  }
735  }
736 
737  if (sh->slice_type == HEVC_SLICE_B)
738  sh->mvd_l1_zero_flag = get_bits1(gb);
739 
740  if (s->ps.pps->cabac_init_present_flag)
741  sh->cabac_init_flag = get_bits1(gb);
742  else
743  sh->cabac_init_flag = 0;
744 
745  sh->collocated_ref_idx = 0;
747  sh->collocated_list = L0;
748  if (sh->slice_type == HEVC_SLICE_B)
749  sh->collocated_list = !get_bits1(gb);
750 
751  if (sh->nb_refs[sh->collocated_list] > 1) {
753  if (sh->collocated_ref_idx >= sh->nb_refs[sh->collocated_list]) {
754  av_log(s->avctx, AV_LOG_ERROR,
755  "Invalid collocated_ref_idx: %d.\n",
756  sh->collocated_ref_idx);
757  return AVERROR_INVALIDDATA;
758  }
759  }
760  }
761 
762  if ((s->ps.pps->weighted_pred_flag && sh->slice_type == HEVC_SLICE_P) ||
763  (s->ps.pps->weighted_bipred_flag && sh->slice_type == HEVC_SLICE_B)) {
764  int ret = pred_weight_table(s, gb);
765  if (ret < 0)
766  return ret;
767  }
768 
770  if (sh->max_num_merge_cand < 1 || sh->max_num_merge_cand > 5) {
771  av_log(s->avctx, AV_LOG_ERROR,
772  "Invalid number of merging MVP candidates: %d.\n",
773  sh->max_num_merge_cand);
774  return AVERROR_INVALIDDATA;
775  }
776  }
777 
778  sh->slice_qp_delta = get_se_golomb(gb);
779 
780  if (s->ps.pps->pic_slice_level_chroma_qp_offsets_present_flag) {
783  if (sh->slice_cb_qp_offset < -12 || sh->slice_cb_qp_offset > 12 ||
784  sh->slice_cr_qp_offset < -12 || sh->slice_cr_qp_offset > 12) {
785  av_log(s->avctx, AV_LOG_ERROR, "Invalid slice cx qp offset.\n");
786  return AVERROR_INVALIDDATA;
787  }
788  } else {
789  sh->slice_cb_qp_offset = 0;
790  sh->slice_cr_qp_offset = 0;
791  }
792 
793  if (s->ps.pps->chroma_qp_offset_list_enabled_flag)
795  else
797 
798  if (s->ps.pps->deblocking_filter_control_present_flag) {
799  int deblocking_filter_override_flag = 0;
800 
801  if (s->ps.pps->deblocking_filter_override_enabled_flag)
802  deblocking_filter_override_flag = get_bits1(gb);
803 
804  if (deblocking_filter_override_flag) {
807  int beta_offset_div2 = get_se_golomb(gb);
808  int tc_offset_div2 = get_se_golomb(gb) ;
809  if (beta_offset_div2 < -6 || beta_offset_div2 > 6 ||
810  tc_offset_div2 < -6 || tc_offset_div2 > 6) {
811  av_log(s->avctx, AV_LOG_ERROR,
812  "Invalid deblock filter offsets: %d, %d\n",
813  beta_offset_div2, tc_offset_div2);
814  return AVERROR_INVALIDDATA;
815  }
816  sh->beta_offset = beta_offset_div2 * 2;
817  sh->tc_offset = tc_offset_div2 * 2;
818  }
819  } else {
820  sh->disable_deblocking_filter_flag = s->ps.pps->disable_dbf;
821  sh->beta_offset = s->ps.pps->beta_offset;
822  sh->tc_offset = s->ps.pps->tc_offset;
823  }
824  } else {
826  sh->beta_offset = 0;
827  sh->tc_offset = 0;
828  }
829 
830  if (s->ps.pps->seq_loop_filter_across_slices_enabled_flag &&
835  } else {
836  sh->slice_loop_filter_across_slices_enabled_flag = s->ps.pps->seq_loop_filter_across_slices_enabled_flag;
837  }
838  } else if (!s->slice_initialized) {
839  av_log(s->avctx, AV_LOG_ERROR, "Independent slice segment missing.\n");
840  return AVERROR_INVALIDDATA;
841  }
842 
843  sh->num_entry_point_offsets = 0;
844  if (s->ps.pps->tiles_enabled_flag || s->ps.pps->entropy_coding_sync_enabled_flag) {
845  unsigned num_entry_point_offsets = get_ue_golomb_long(gb);
846  // It would be possible to bound this tighter but this here is simpler
847  if (num_entry_point_offsets > get_bits_left(gb)) {
848  av_log(s->avctx, AV_LOG_ERROR, "num_entry_point_offsets %d is invalid\n", num_entry_point_offsets);
849  return AVERROR_INVALIDDATA;
850  }
851 
852  sh->num_entry_point_offsets = num_entry_point_offsets;
853  if (sh->num_entry_point_offsets > 0) {
854  int offset_len = get_ue_golomb_long(gb) + 1;
855 
856  if (offset_len < 1 || offset_len > 32) {
857  sh->num_entry_point_offsets = 0;
858  av_log(s->avctx, AV_LOG_ERROR, "offset_len %d is invalid\n", offset_len);
859  return AVERROR_INVALIDDATA;
860  }
861 
863  av_freep(&sh->offset);
864  av_freep(&sh->size);
865  sh->entry_point_offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(unsigned));
866  sh->offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
867  sh->size = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
868  if (!sh->entry_point_offset || !sh->offset || !sh->size) {
869  sh->num_entry_point_offsets = 0;
870  av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate memory\n");
871  return AVERROR(ENOMEM);
872  }
873  for (i = 0; i < sh->num_entry_point_offsets; i++) {
874  unsigned val = get_bits_long(gb, offset_len);
875  sh->entry_point_offset[i] = val + 1; // +1; // +1 to get the size
876  }
877  if (s->threads_number > 1 && (s->ps.pps->num_tile_rows > 1 || s->ps.pps->num_tile_columns > 1)) {
878  s->enable_parallel_tiles = 0; // TODO: you can enable tiles in parallel here
879  s->threads_number = 1;
880  } else
881  s->enable_parallel_tiles = 0;
882  } else
883  s->enable_parallel_tiles = 0;
884  }
885 
886  if (s->ps.pps->slice_header_extension_present_flag) {
887  unsigned int length = get_ue_golomb_long(gb);
888  if (length*8LL > get_bits_left(gb)) {
889  av_log(s->avctx, AV_LOG_ERROR, "too many slice_header_extension_data_bytes\n");
890  return AVERROR_INVALIDDATA;
891  }
892  for (i = 0; i < length; i++)
893  skip_bits(gb, 8); // slice_header_extension_data_byte
894  }
895 
896  // Inferred parameters
897  sh->slice_qp = 26U + s->ps.pps->pic_init_qp_minus26 + sh->slice_qp_delta;
898  if (sh->slice_qp > 51 ||
899  sh->slice_qp < -s->ps.sps->qp_bd_offset) {
900  av_log(s->avctx, AV_LOG_ERROR,
901  "The slice_qp %d is outside the valid range "
902  "[%d, 51].\n",
903  sh->slice_qp,
904  -s->ps.sps->qp_bd_offset);
905  return AVERROR_INVALIDDATA;
906  }
907 
909 
910  if (!s->sh.slice_ctb_addr_rs && s->sh.dependent_slice_segment_flag) {
911  av_log(s->avctx, AV_LOG_ERROR, "Impossible slice segment.\n");
912  return AVERROR_INVALIDDATA;
913  }
914 
915  if (get_bits_left(gb) < 0) {
916  av_log(s->avctx, AV_LOG_ERROR,
917  "Overread slice header by %d bits\n", -get_bits_left(gb));
918  return AVERROR_INVALIDDATA;
919  }
920 
921  s->HEVClc->first_qp_group = !s->sh.dependent_slice_segment_flag;
922 
923  if (!s->ps.pps->cu_qp_delta_enabled_flag)
924  s->HEVClc->qp_y = s->sh.slice_qp;
925 
926  s->slice_initialized = 1;
927  s->HEVClc->tu.cu_qp_offset_cb = 0;
928  s->HEVClc->tu.cu_qp_offset_cr = 0;
929 
930  return 0;
931 }
932 
933 #define CTB(tab, x, y) ((tab)[(y) * s->ps.sps->ctb_width + (x)])
934 
935 #define SET_SAO(elem, value) \
936 do { \
937  if (!sao_merge_up_flag && !sao_merge_left_flag) \
938  sao->elem = value; \
939  else if (sao_merge_left_flag) \
940  sao->elem = CTB(s->sao, rx-1, ry).elem; \
941  else if (sao_merge_up_flag) \
942  sao->elem = CTB(s->sao, rx, ry-1).elem; \
943  else \
944  sao->elem = 0; \
945 } while (0)
946 
947 static void hls_sao_param(HEVCContext *s, int rx, int ry)
948 {
949  HEVCLocalContext *lc = s->HEVClc;
950  int sao_merge_left_flag = 0;
951  int sao_merge_up_flag = 0;
952  SAOParams *sao = &CTB(s->sao, rx, ry);
953  int c_idx, i;
954 
955  if (s->sh.slice_sample_adaptive_offset_flag[0] ||
956  s->sh.slice_sample_adaptive_offset_flag[1]) {
957  if (rx > 0) {
958  if (lc->ctb_left_flag)
959  sao_merge_left_flag = ff_hevc_sao_merge_flag_decode(s);
960  }
961  if (ry > 0 && !sao_merge_left_flag) {
962  if (lc->ctb_up_flag)
963  sao_merge_up_flag = ff_hevc_sao_merge_flag_decode(s);
964  }
965  }
966 
967  for (c_idx = 0; c_idx < (s->ps.sps->chroma_format_idc ? 3 : 1); c_idx++) {
968  int log2_sao_offset_scale = c_idx == 0 ? s->ps.pps->log2_sao_offset_scale_luma :
969  s->ps.pps->log2_sao_offset_scale_chroma;
970 
971  if (!s->sh.slice_sample_adaptive_offset_flag[c_idx]) {
972  sao->type_idx[c_idx] = SAO_NOT_APPLIED;
973  continue;
974  }
975 
976  if (c_idx == 2) {
977  sao->type_idx[2] = sao->type_idx[1];
978  sao->eo_class[2] = sao->eo_class[1];
979  } else {
980  SET_SAO(type_idx[c_idx], ff_hevc_sao_type_idx_decode(s));
981  }
982 
983  if (sao->type_idx[c_idx] == SAO_NOT_APPLIED)
984  continue;
985 
986  for (i = 0; i < 4; i++)
987  SET_SAO(offset_abs[c_idx][i], ff_hevc_sao_offset_abs_decode(s));
988 
989  if (sao->type_idx[c_idx] == SAO_BAND) {
990  for (i = 0; i < 4; i++) {
991  if (sao->offset_abs[c_idx][i]) {
992  SET_SAO(offset_sign[c_idx][i],
994  } else {
995  sao->offset_sign[c_idx][i] = 0;
996  }
997  }
998  SET_SAO(band_position[c_idx], ff_hevc_sao_band_position_decode(s));
999  } else if (c_idx != 2) {
1000  SET_SAO(eo_class[c_idx], ff_hevc_sao_eo_class_decode(s));
1001  }
1002 
1003  // Inferred parameters
1004  sao->offset_val[c_idx][0] = 0;
1005  for (i = 0; i < 4; i++) {
1006  sao->offset_val[c_idx][i + 1] = sao->offset_abs[c_idx][i];
1007  if (sao->type_idx[c_idx] == SAO_EDGE) {
1008  if (i > 1)
1009  sao->offset_val[c_idx][i + 1] = -sao->offset_val[c_idx][i + 1];
1010  } else if (sao->offset_sign[c_idx][i]) {
1011  sao->offset_val[c_idx][i + 1] = -sao->offset_val[c_idx][i + 1];
1012  }
1013  sao->offset_val[c_idx][i + 1] *= 1 << log2_sao_offset_scale;
1014  }
1015  }
1016 }
1017 
1018 #undef SET_SAO
1019 #undef CTB
1020 
1021 static int hls_cross_component_pred(HEVCContext *s, int idx) {
1022  HEVCLocalContext *lc = s->HEVClc;
1023  int log2_res_scale_abs_plus1 = ff_hevc_log2_res_scale_abs(s, idx);
1024 
1025  if (log2_res_scale_abs_plus1 != 0) {
1026  int res_scale_sign_flag = ff_hevc_res_scale_sign_flag(s, idx);
1027  lc->tu.res_scale_val = (1 << (log2_res_scale_abs_plus1 - 1)) *
1028  (1 - 2 * res_scale_sign_flag);
1029  } else {
1030  lc->tu.res_scale_val = 0;
1031  }
1032 
1033 
1034  return 0;
1035 }
1036 
1037 static int hls_transform_unit(HEVCContext *s, int x0, int y0,
1038  int xBase, int yBase, int cb_xBase, int cb_yBase,
1039  int log2_cb_size, int log2_trafo_size,
1040  int blk_idx, int cbf_luma, int *cbf_cb, int *cbf_cr)
1041 {
1042  HEVCLocalContext *lc = s->HEVClc;
1043  const int log2_trafo_size_c = log2_trafo_size - s->ps.sps->hshift[1];
1044  int i;
1045 
1046  if (lc->cu.pred_mode == MODE_INTRA) {
1047  int trafo_size = 1 << log2_trafo_size;
1048  ff_hevc_set_neighbour_available(s, x0, y0, trafo_size, trafo_size);
1049 
1050  s->hpc.intra_pred[log2_trafo_size - 2](s, x0, y0, 0);
1051  }
1052 
1053  if (cbf_luma || cbf_cb[0] || cbf_cr[0] ||
1054  (s->ps.sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1]))) {
1055  int scan_idx = SCAN_DIAG;
1056  int scan_idx_c = SCAN_DIAG;
1057  int cbf_chroma = cbf_cb[0] || cbf_cr[0] ||
1058  (s->ps.sps->chroma_format_idc == 2 &&
1059  (cbf_cb[1] || cbf_cr[1]));
1060 
1061  if (s->ps.pps->cu_qp_delta_enabled_flag && !lc->tu.is_cu_qp_delta_coded) {
1063  if (lc->tu.cu_qp_delta != 0)
1064  if (ff_hevc_cu_qp_delta_sign_flag(s) == 1)
1065  lc->tu.cu_qp_delta = -lc->tu.cu_qp_delta;
1066  lc->tu.is_cu_qp_delta_coded = 1;
1067 
1068  if (lc->tu.cu_qp_delta < -(26 + s->ps.sps->qp_bd_offset / 2) ||
1069  lc->tu.cu_qp_delta > (25 + s->ps.sps->qp_bd_offset / 2)) {
1070  av_log(s->avctx, AV_LOG_ERROR,
1071  "The cu_qp_delta %d is outside the valid range "
1072  "[%d, %d].\n",
1073  lc->tu.cu_qp_delta,
1074  -(26 + s->ps.sps->qp_bd_offset / 2),
1075  (25 + s->ps.sps->qp_bd_offset / 2));
1076  return AVERROR_INVALIDDATA;
1077  }
1078 
1079  ff_hevc_set_qPy(s, cb_xBase, cb_yBase, log2_cb_size);
1080  }
1081 
1082  if (s->sh.cu_chroma_qp_offset_enabled_flag && cbf_chroma &&
1084  int cu_chroma_qp_offset_flag = ff_hevc_cu_chroma_qp_offset_flag(s);
1085  if (cu_chroma_qp_offset_flag) {
1086  int cu_chroma_qp_offset_idx = 0;
1087  if (s->ps.pps->chroma_qp_offset_list_len_minus1 > 0) {
1088  cu_chroma_qp_offset_idx = ff_hevc_cu_chroma_qp_offset_idx(s);
1089  av_log(s->avctx, AV_LOG_ERROR,
1090  "cu_chroma_qp_offset_idx not yet tested.\n");
1091  }
1092  lc->tu.cu_qp_offset_cb = s->ps.pps->cb_qp_offset_list[cu_chroma_qp_offset_idx];
1093  lc->tu.cu_qp_offset_cr = s->ps.pps->cr_qp_offset_list[cu_chroma_qp_offset_idx];
1094  } else {
1095  lc->tu.cu_qp_offset_cb = 0;
1096  lc->tu.cu_qp_offset_cr = 0;
1097  }
1099  }
1100 
1101  if (lc->cu.pred_mode == MODE_INTRA && log2_trafo_size < 4) {
1102  if (lc->tu.intra_pred_mode >= 6 &&
1103  lc->tu.intra_pred_mode <= 14) {
1104  scan_idx = SCAN_VERT;
1105  } else if (lc->tu.intra_pred_mode >= 22 &&
1106  lc->tu.intra_pred_mode <= 30) {
1107  scan_idx = SCAN_HORIZ;
1108  }
1109 
1110  if (lc->tu.intra_pred_mode_c >= 6 &&
1111  lc->tu.intra_pred_mode_c <= 14) {
1112  scan_idx_c = SCAN_VERT;
1113  } else if (lc->tu.intra_pred_mode_c >= 22 &&
1114  lc->tu.intra_pred_mode_c <= 30) {
1115  scan_idx_c = SCAN_HORIZ;
1116  }
1117  }
1118 
1119  lc->tu.cross_pf = 0;
1120 
1121  if (cbf_luma)
1122  ff_hevc_hls_residual_coding(s, x0, y0, log2_trafo_size, scan_idx, 0);
1123  if (s->ps.sps->chroma_format_idc && (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3)) {
1124  int trafo_size_h = 1 << (log2_trafo_size_c + s->ps.sps->hshift[1]);
1125  int trafo_size_v = 1 << (log2_trafo_size_c + s->ps.sps->vshift[1]);
1126  lc->tu.cross_pf = (s->ps.pps->cross_component_prediction_enabled_flag && cbf_luma &&
1127  (lc->cu.pred_mode == MODE_INTER ||
1128  (lc->tu.chroma_mode_c == 4)));
1129 
1130  if (lc->tu.cross_pf) {
1132  }
1133  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1134  if (lc->cu.pred_mode == MODE_INTRA) {
1135  ff_hevc_set_neighbour_available(s, x0, y0 + (i << log2_trafo_size_c), trafo_size_h, trafo_size_v);
1136  s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (i << log2_trafo_size_c), 1);
1137  }
1138  if (cbf_cb[i])
1139  ff_hevc_hls_residual_coding(s, x0, y0 + (i << log2_trafo_size_c),
1140  log2_trafo_size_c, scan_idx_c, 1);
1141  else
1142  if (lc->tu.cross_pf) {
1143  ptrdiff_t stride = s->frame->linesize[1];
1144  int hshift = s->ps.sps->hshift[1];
1145  int vshift = s->ps.sps->vshift[1];
1146  int16_t *coeffs_y = (int16_t*)lc->edge_emu_buffer;
1147  int16_t *coeffs = (int16_t*)lc->edge_emu_buffer2;
1148  int size = 1 << log2_trafo_size_c;
1149 
1150  uint8_t *dst = &s->frame->data[1][(y0 >> vshift) * stride +
1151  ((x0 >> hshift) << s->ps.sps->pixel_shift)];
1152  for (i = 0; i < (size * size); i++) {
1153  coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3);
1154  }
1155  s->hevcdsp.add_residual[log2_trafo_size_c-2](dst, coeffs, stride);
1156  }
1157  }
1158 
1159  if (lc->tu.cross_pf) {
1161  }
1162  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1163  if (lc->cu.pred_mode == MODE_INTRA) {
1164  ff_hevc_set_neighbour_available(s, x0, y0 + (i << log2_trafo_size_c), trafo_size_h, trafo_size_v);
1165  s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (i << log2_trafo_size_c), 2);
1166  }
1167  if (cbf_cr[i])
1168  ff_hevc_hls_residual_coding(s, x0, y0 + (i << log2_trafo_size_c),
1169  log2_trafo_size_c, scan_idx_c, 2);
1170  else
1171  if (lc->tu.cross_pf) {
1172  ptrdiff_t stride = s->frame->linesize[2];
1173  int hshift = s->ps.sps->hshift[2];
1174  int vshift = s->ps.sps->vshift[2];
1175  int16_t *coeffs_y = (int16_t*)lc->edge_emu_buffer;
1176  int16_t *coeffs = (int16_t*)lc->edge_emu_buffer2;
1177  int size = 1 << log2_trafo_size_c;
1178 
1179  uint8_t *dst = &s->frame->data[2][(y0 >> vshift) * stride +
1180  ((x0 >> hshift) << s->ps.sps->pixel_shift)];
1181  for (i = 0; i < (size * size); i++) {
1182  coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3);
1183  }
1184  s->hevcdsp.add_residual[log2_trafo_size_c-2](dst, coeffs, stride);
1185  }
1186  }
1187  } else if (s->ps.sps->chroma_format_idc && blk_idx == 3) {
1188  int trafo_size_h = 1 << (log2_trafo_size + 1);
1189  int trafo_size_v = 1 << (log2_trafo_size + s->ps.sps->vshift[1]);
1190  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1191  if (lc->cu.pred_mode == MODE_INTRA) {
1192  ff_hevc_set_neighbour_available(s, xBase, yBase + (i << log2_trafo_size),
1193  trafo_size_h, trafo_size_v);
1194  s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (i << log2_trafo_size), 1);
1195  }
1196  if (cbf_cb[i])
1197  ff_hevc_hls_residual_coding(s, xBase, yBase + (i << log2_trafo_size),
1198  log2_trafo_size, scan_idx_c, 1);
1199  }
1200  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1201  if (lc->cu.pred_mode == MODE_INTRA) {
1202  ff_hevc_set_neighbour_available(s, xBase, yBase + (i << log2_trafo_size),
1203  trafo_size_h, trafo_size_v);
1204  s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (i << log2_trafo_size), 2);
1205  }
1206  if (cbf_cr[i])
1207  ff_hevc_hls_residual_coding(s, xBase, yBase + (i << log2_trafo_size),
1208  log2_trafo_size, scan_idx_c, 2);
1209  }
1210  }
1211  } else if (s->ps.sps->chroma_format_idc && lc->cu.pred_mode == MODE_INTRA) {
1212  if (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3) {
1213  int trafo_size_h = 1 << (log2_trafo_size_c + s->ps.sps->hshift[1]);
1214  int trafo_size_v = 1 << (log2_trafo_size_c + s->ps.sps->vshift[1]);
1215  ff_hevc_set_neighbour_available(s, x0, y0, trafo_size_h, trafo_size_v);
1216  s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0, 1);
1217  s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0, 2);
1218  if (s->ps.sps->chroma_format_idc == 2) {
1219  ff_hevc_set_neighbour_available(s, x0, y0 + (1 << log2_trafo_size_c),
1220  trafo_size_h, trafo_size_v);
1221  s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (1 << log2_trafo_size_c), 1);
1222  s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (1 << log2_trafo_size_c), 2);
1223  }
1224  } else if (blk_idx == 3) {
1225  int trafo_size_h = 1 << (log2_trafo_size + 1);
1226  int trafo_size_v = 1 << (log2_trafo_size + s->ps.sps->vshift[1]);
1227  ff_hevc_set_neighbour_available(s, xBase, yBase,
1228  trafo_size_h, trafo_size_v);
1229  s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase, 1);
1230  s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase, 2);
1231  if (s->ps.sps->chroma_format_idc == 2) {
1232  ff_hevc_set_neighbour_available(s, xBase, yBase + (1 << (log2_trafo_size)),
1233  trafo_size_h, trafo_size_v);
1234  s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (1 << (log2_trafo_size)), 1);
1235  s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (1 << (log2_trafo_size)), 2);
1236  }
1237  }
1238  }
1239 
1240  return 0;
1241 }
1242 
1243 static void set_deblocking_bypass(HEVCContext *s, int x0, int y0, int log2_cb_size)
1244 {
1245  int cb_size = 1 << log2_cb_size;
1246  int log2_min_pu_size = s->ps.sps->log2_min_pu_size;
1247 
1248  int min_pu_width = s->ps.sps->min_pu_width;
1249  int x_end = FFMIN(x0 + cb_size, s->ps.sps->width);
1250  int y_end = FFMIN(y0 + cb_size, s->ps.sps->height);
1251  int i, j;
1252 
1253  for (j = (y0 >> log2_min_pu_size); j < (y_end >> log2_min_pu_size); j++)
1254  for (i = (x0 >> log2_min_pu_size); i < (x_end >> log2_min_pu_size); i++)
1255  s->is_pcm[i + j * min_pu_width] = 2;
1256 }
1257 
1258 static int hls_transform_tree(HEVCContext *s, int x0, int y0,
1259  int xBase, int yBase, int cb_xBase, int cb_yBase,
1260  int log2_cb_size, int log2_trafo_size,
1261  int trafo_depth, int blk_idx,
1262  const int *base_cbf_cb, const int *base_cbf_cr)
1263 {
1264  HEVCLocalContext *lc = s->HEVClc;
1265  uint8_t split_transform_flag;
1266  int cbf_cb[2];
1267  int cbf_cr[2];
1268  int ret;
1269 
1270  cbf_cb[0] = base_cbf_cb[0];
1271  cbf_cb[1] = base_cbf_cb[1];
1272  cbf_cr[0] = base_cbf_cr[0];
1273  cbf_cr[1] = base_cbf_cr[1];
1274 
1275  if (lc->cu.intra_split_flag) {
1276  if (trafo_depth == 1) {
1277  lc->tu.intra_pred_mode = lc->pu.intra_pred_mode[blk_idx];
1278  if (s->ps.sps->chroma_format_idc == 3) {
1279  lc->tu.intra_pred_mode_c = lc->pu.intra_pred_mode_c[blk_idx];
1280  lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[blk_idx];
1281  } else {
1283  lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[0];
1284  }
1285  }
1286  } else {
1287  lc->tu.intra_pred_mode = lc->pu.intra_pred_mode[0];
1289  lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[0];
1290  }
1291 
1292  if (log2_trafo_size <= s->ps.sps->log2_max_trafo_size &&
1293  log2_trafo_size > s->ps.sps->log2_min_tb_size &&
1294  trafo_depth < lc->cu.max_trafo_depth &&
1295  !(lc->cu.intra_split_flag && trafo_depth == 0)) {
1296  split_transform_flag = ff_hevc_split_transform_flag_decode(s, log2_trafo_size);
1297  } else {
1298  int inter_split = s->ps.sps->max_transform_hierarchy_depth_inter == 0 &&
1299  lc->cu.pred_mode == MODE_INTER &&
1300  lc->cu.part_mode != PART_2Nx2N &&
1301  trafo_depth == 0;
1302 
1303  split_transform_flag = log2_trafo_size > s->ps.sps->log2_max_trafo_size ||
1304  (lc->cu.intra_split_flag && trafo_depth == 0) ||
1305  inter_split;
1306  }
1307 
1308  if (s->ps.sps->chroma_format_idc && (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3)) {
1309  if (trafo_depth == 0 || cbf_cb[0]) {
1310  cbf_cb[0] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
1311  if (s->ps.sps->chroma_format_idc == 2 && (!split_transform_flag || log2_trafo_size == 3)) {
1312  cbf_cb[1] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
1313  }
1314  }
1315 
1316  if (trafo_depth == 0 || cbf_cr[0]) {
1317  cbf_cr[0] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
1318  if (s->ps.sps->chroma_format_idc == 2 && (!split_transform_flag || log2_trafo_size == 3)) {
1319  cbf_cr[1] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
1320  }
1321  }
1322  }
1323 
1324  if (split_transform_flag) {
1325  const int trafo_size_split = 1 << (log2_trafo_size - 1);
1326  const int x1 = x0 + trafo_size_split;
1327  const int y1 = y0 + trafo_size_split;
1328 
1329 #define SUBDIVIDE(x, y, idx) \
1330 do { \
1331  ret = hls_transform_tree(s, x, y, x0, y0, cb_xBase, cb_yBase, log2_cb_size, \
1332  log2_trafo_size - 1, trafo_depth + 1, idx, \
1333  cbf_cb, cbf_cr); \
1334  if (ret < 0) \
1335  return ret; \
1336 } while (0)
1337 
1338  SUBDIVIDE(x0, y0, 0);
1339  SUBDIVIDE(x1, y0, 1);
1340  SUBDIVIDE(x0, y1, 2);
1341  SUBDIVIDE(x1, y1, 3);
1342 
1343 #undef SUBDIVIDE
1344  } else {
1345  int min_tu_size = 1 << s->ps.sps->log2_min_tb_size;
1346  int log2_min_tu_size = s->ps.sps->log2_min_tb_size;
1347  int min_tu_width = s->ps.sps->min_tb_width;
1348  int cbf_luma = 1;
1349 
1350  if (lc->cu.pred_mode == MODE_INTRA || trafo_depth != 0 ||
1351  cbf_cb[0] || cbf_cr[0] ||
1352  (s->ps.sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1]))) {
1353  cbf_luma = ff_hevc_cbf_luma_decode(s, trafo_depth);
1354  }
1355 
1356  ret = hls_transform_unit(s, x0, y0, xBase, yBase, cb_xBase, cb_yBase,
1357  log2_cb_size, log2_trafo_size,
1358  blk_idx, cbf_luma, cbf_cb, cbf_cr);
1359  if (ret < 0)
1360  return ret;
1361  // TODO: store cbf_luma somewhere else
1362  if (cbf_luma) {
1363  int i, j;
1364  for (i = 0; i < (1 << log2_trafo_size); i += min_tu_size)
1365  for (j = 0; j < (1 << log2_trafo_size); j += min_tu_size) {
1366  int x_tu = (x0 + j) >> log2_min_tu_size;
1367  int y_tu = (y0 + i) >> log2_min_tu_size;
1368  s->cbf_luma[y_tu * min_tu_width + x_tu] = 1;
1369  }
1370  }
1371  if (!s->sh.disable_deblocking_filter_flag) {
1372  ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_trafo_size);
1373  if (s->ps.pps->transquant_bypass_enable_flag &&
1375  set_deblocking_bypass(s, x0, y0, log2_trafo_size);
1376  }
1377  }
1378  return 0;
1379 }
1380 
1381 static int hls_pcm_sample(HEVCContext *s, int x0, int y0, int log2_cb_size)
1382 {
1383  HEVCLocalContext *lc = s->HEVClc;
1384  GetBitContext gb;
1385  int cb_size = 1 << log2_cb_size;
1386  ptrdiff_t stride0 = s->frame->linesize[0];
1387  ptrdiff_t stride1 = s->frame->linesize[1];
1388  ptrdiff_t stride2 = s->frame->linesize[2];
1389  uint8_t *dst0 = &s->frame->data[0][y0 * stride0 + (x0 << s->ps.sps->pixel_shift)];
1390  uint8_t *dst1 = &s->frame->data[1][(y0 >> s->ps.sps->vshift[1]) * stride1 + ((x0 >> s->ps.sps->hshift[1]) << s->ps.sps->pixel_shift)];
1391  uint8_t *dst2 = &s->frame->data[2][(y0 >> s->ps.sps->vshift[2]) * stride2 + ((x0 >> s->ps.sps->hshift[2]) << s->ps.sps->pixel_shift)];
1392 
1393  int length = cb_size * cb_size * s->ps.sps->pcm.bit_depth +
1394  (((cb_size >> s->ps.sps->hshift[1]) * (cb_size >> s->ps.sps->vshift[1])) +
1395  ((cb_size >> s->ps.sps->hshift[2]) * (cb_size >> s->ps.sps->vshift[2]))) *
1396  s->ps.sps->pcm.bit_depth_chroma;
1397  const uint8_t *pcm = skip_bytes(&lc->cc, (length + 7) >> 3);
1398  int ret;
1399 
1400  if (!s->sh.disable_deblocking_filter_flag)
1401  ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_cb_size);
1402 
1403  ret = init_get_bits(&gb, pcm, length);
1404  if (ret < 0)
1405  return ret;
1406 
1407  s->hevcdsp.put_pcm(dst0, stride0, cb_size, cb_size, &gb, s->ps.sps->pcm.bit_depth);
1408  if (s->ps.sps->chroma_format_idc) {
1409  s->hevcdsp.put_pcm(dst1, stride1,
1410  cb_size >> s->ps.sps->hshift[1],
1411  cb_size >> s->ps.sps->vshift[1],
1412  &gb, s->ps.sps->pcm.bit_depth_chroma);
1413  s->hevcdsp.put_pcm(dst2, stride2,
1414  cb_size >> s->ps.sps->hshift[2],
1415  cb_size >> s->ps.sps->vshift[2],
1416  &gb, s->ps.sps->pcm.bit_depth_chroma);
1417  }
1418 
1419  return 0;
1420 }
1421 
1422 /**
1423  * 8.5.3.2.2.1 Luma sample unidirectional interpolation process
1424  *
1425  * @param s HEVC decoding context
1426  * @param dst target buffer for block data at block position
1427  * @param dststride stride of the dst buffer
1428  * @param ref reference picture buffer at origin (0, 0)
1429  * @param mv motion vector (relative to block position) to get pixel data from
1430  * @param x_off horizontal position of block from origin (0, 0)
1431  * @param y_off vertical position of block from origin (0, 0)
1432  * @param block_w width of block
1433  * @param block_h height of block
1434  * @param luma_weight weighting factor applied to the luma prediction
1435  * @param luma_offset additive offset applied to the luma prediction value
1436  */
1437 
1438 static void luma_mc_uni(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride,
1439  AVFrame *ref, const Mv *mv, int x_off, int y_off,
1440  int block_w, int block_h, int luma_weight, int luma_offset)
1441 {
1442  HEVCLocalContext *lc = s->HEVClc;
1443  uint8_t *src = ref->data[0];
1444  ptrdiff_t srcstride = ref->linesize[0];
1445  int pic_width = s->ps.sps->width;
1446  int pic_height = s->ps.sps->height;
1447  int mx = mv->x & 3;
1448  int my = mv->y & 3;
1449  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1450  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1451  int idx = ff_hevc_pel_weight[block_w];
1452 
1453  x_off += mv->x >> 2;
1454  y_off += mv->y >> 2;
1455  src += y_off * srcstride + (x_off * (1 << s->ps.sps->pixel_shift));
1456 
1457  if (x_off < QPEL_EXTRA_BEFORE || y_off < QPEL_EXTRA_AFTER ||
1458  x_off >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1459  y_off >= pic_height - block_h - QPEL_EXTRA_AFTER) {
1460  const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1461  int offset = QPEL_EXTRA_BEFORE * srcstride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1462  int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1463 
1464  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src - offset,
1465  edge_emu_stride, srcstride,
1466  block_w + QPEL_EXTRA,
1467  block_h + QPEL_EXTRA,
1468  x_off - QPEL_EXTRA_BEFORE, y_off - QPEL_EXTRA_BEFORE,
1469  pic_width, pic_height);
1470  src = lc->edge_emu_buffer + buf_offset;
1471  srcstride = edge_emu_stride;
1472  }
1473 
1474  if (!weight_flag)
1475  s->hevcdsp.put_hevc_qpel_uni[idx][!!my][!!mx](dst, dststride, src, srcstride,
1476  block_h, mx, my, block_w);
1477  else
1478  s->hevcdsp.put_hevc_qpel_uni_w[idx][!!my][!!mx](dst, dststride, src, srcstride,
1479  block_h, s->sh.luma_log2_weight_denom,
1480  luma_weight, luma_offset, mx, my, block_w);
1481 }
1482 
1483 /**
1484  * 8.5.3.2.2.1 Luma sample bidirectional interpolation process
1485  *
1486  * @param s HEVC decoding context
1487  * @param dst target buffer for block data at block position
1488  * @param dststride stride of the dst buffer
1489  * @param ref0 reference picture0 buffer at origin (0, 0)
1490  * @param mv0 motion vector0 (relative to block position) to get pixel data from
1491  * @param x_off horizontal position of block from origin (0, 0)
1492  * @param y_off vertical position of block from origin (0, 0)
1493  * @param block_w width of block
1494  * @param block_h height of block
1495  * @param ref1 reference picture1 buffer at origin (0, 0)
1496  * @param mv1 motion vector1 (relative to block position) to get pixel data from
1497  * @param current_mv current motion vector structure
1498  */
1499  static void luma_mc_bi(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride,
1500  AVFrame *ref0, const Mv *mv0, int x_off, int y_off,
1501  int block_w, int block_h, AVFrame *ref1, const Mv *mv1, struct MvField *current_mv)
1502 {
1503  HEVCLocalContext *lc = s->HEVClc;
1504  ptrdiff_t src0stride = ref0->linesize[0];
1505  ptrdiff_t src1stride = ref1->linesize[0];
1506  int pic_width = s->ps.sps->width;
1507  int pic_height = s->ps.sps->height;
1508  int mx0 = mv0->x & 3;
1509  int my0 = mv0->y & 3;
1510  int mx1 = mv1->x & 3;
1511  int my1 = mv1->y & 3;
1512  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1513  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1514  int x_off0 = x_off + (mv0->x >> 2);
1515  int y_off0 = y_off + (mv0->y >> 2);
1516  int x_off1 = x_off + (mv1->x >> 2);
1517  int y_off1 = y_off + (mv1->y >> 2);
1518  int idx = ff_hevc_pel_weight[block_w];
1519 
1520  uint8_t *src0 = ref0->data[0] + y_off0 * src0stride + (int)((unsigned)x_off0 << s->ps.sps->pixel_shift);
1521  uint8_t *src1 = ref1->data[0] + y_off1 * src1stride + (int)((unsigned)x_off1 << s->ps.sps->pixel_shift);
1522 
1523  if (x_off0 < QPEL_EXTRA_BEFORE || y_off0 < QPEL_EXTRA_AFTER ||
1524  x_off0 >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1525  y_off0 >= pic_height - block_h - QPEL_EXTRA_AFTER) {
1526  const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1527  int offset = QPEL_EXTRA_BEFORE * src0stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1528  int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1529 
1530  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src0 - offset,
1531  edge_emu_stride, src0stride,
1532  block_w + QPEL_EXTRA,
1533  block_h + QPEL_EXTRA,
1534  x_off0 - QPEL_EXTRA_BEFORE, y_off0 - QPEL_EXTRA_BEFORE,
1535  pic_width, pic_height);
1536  src0 = lc->edge_emu_buffer + buf_offset;
1537  src0stride = edge_emu_stride;
1538  }
1539 
1540  if (x_off1 < QPEL_EXTRA_BEFORE || y_off1 < QPEL_EXTRA_AFTER ||
1541  x_off1 >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1542  y_off1 >= pic_height - block_h - QPEL_EXTRA_AFTER) {
1543  const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1544  int offset = QPEL_EXTRA_BEFORE * src1stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1545  int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1546 
1547  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer2, src1 - offset,
1548  edge_emu_stride, src1stride,
1549  block_w + QPEL_EXTRA,
1550  block_h + QPEL_EXTRA,
1551  x_off1 - QPEL_EXTRA_BEFORE, y_off1 - QPEL_EXTRA_BEFORE,
1552  pic_width, pic_height);
1553  src1 = lc->edge_emu_buffer2 + buf_offset;
1554  src1stride = edge_emu_stride;
1555  }
1556 
1557  s->hevcdsp.put_hevc_qpel[idx][!!my0][!!mx0](lc->tmp, src0, src0stride,
1558  block_h, mx0, my0, block_w);
1559  if (!weight_flag)
1560  s->hevcdsp.put_hevc_qpel_bi[idx][!!my1][!!mx1](dst, dststride, src1, src1stride, lc->tmp,
1561  block_h, mx1, my1, block_w);
1562  else
1563  s->hevcdsp.put_hevc_qpel_bi_w[idx][!!my1][!!mx1](dst, dststride, src1, src1stride, lc->tmp,
1564  block_h, s->sh.luma_log2_weight_denom,
1565  s->sh.luma_weight_l0[current_mv->ref_idx[0]],
1566  s->sh.luma_weight_l1[current_mv->ref_idx[1]],
1567  s->sh.luma_offset_l0[current_mv->ref_idx[0]],
1568  s->sh.luma_offset_l1[current_mv->ref_idx[1]],
1569  mx1, my1, block_w);
1570 
1571 }
1572 
1573 /**
1574  * 8.5.3.2.2.2 Chroma sample uniprediction interpolation process
1575  *
1576  * @param s HEVC decoding context
1577  * @param dst1 target buffer for block data at block position (U plane)
1578  * @param dst2 target buffer for block data at block position (V plane)
1579  * @param dststride stride of the dst1 and dst2 buffers
1580  * @param ref reference picture buffer at origin (0, 0)
1581  * @param mv motion vector (relative to block position) to get pixel data from
1582  * @param x_off horizontal position of block from origin (0, 0)
1583  * @param y_off vertical position of block from origin (0, 0)
1584  * @param block_w width of block
1585  * @param block_h height of block
1586  * @param chroma_weight weighting factor applied to the chroma prediction
1587  * @param chroma_offset additive offset applied to the chroma prediction value
1588  */
1589 
1590 static void chroma_mc_uni(HEVCContext *s, uint8_t *dst0,
1591  ptrdiff_t dststride, uint8_t *src0, ptrdiff_t srcstride, int reflist,
1592  int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int chroma_weight, int chroma_offset)
1593 {
1594  HEVCLocalContext *lc = s->HEVClc;
1595  int pic_width = s->ps.sps->width >> s->ps.sps->hshift[1];
1596  int pic_height = s->ps.sps->height >> s->ps.sps->vshift[1];
1597  const Mv *mv = &current_mv->mv[reflist];
1598  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1599  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1600  int idx = ff_hevc_pel_weight[block_w];
1601  int hshift = s->ps.sps->hshift[1];
1602  int vshift = s->ps.sps->vshift[1];
1603  intptr_t mx = av_mod_uintp2(mv->x, 2 + hshift);
1604  intptr_t my = av_mod_uintp2(mv->y, 2 + vshift);
1605  intptr_t _mx = mx << (1 - hshift);
1606  intptr_t _my = my << (1 - vshift);
1607 
1608  x_off += mv->x >> (2 + hshift);
1609  y_off += mv->y >> (2 + vshift);
1610  src0 += y_off * srcstride + (x_off * (1 << s->ps.sps->pixel_shift));
1611 
1612  if (x_off < EPEL_EXTRA_BEFORE || y_off < EPEL_EXTRA_AFTER ||
1613  x_off >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1614  y_off >= pic_height - block_h - EPEL_EXTRA_AFTER) {
1615  const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1616  int offset0 = EPEL_EXTRA_BEFORE * (srcstride + (1 << s->ps.sps->pixel_shift));
1617  int buf_offset0 = EPEL_EXTRA_BEFORE *
1618  (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1619  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src0 - offset0,
1620  edge_emu_stride, srcstride,
1621  block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1622  x_off - EPEL_EXTRA_BEFORE,
1623  y_off - EPEL_EXTRA_BEFORE,
1624  pic_width, pic_height);
1625 
1626  src0 = lc->edge_emu_buffer + buf_offset0;
1627  srcstride = edge_emu_stride;
1628  }
1629  if (!weight_flag)
1630  s->hevcdsp.put_hevc_epel_uni[idx][!!my][!!mx](dst0, dststride, src0, srcstride,
1631  block_h, _mx, _my, block_w);
1632  else
1633  s->hevcdsp.put_hevc_epel_uni_w[idx][!!my][!!mx](dst0, dststride, src0, srcstride,
1634  block_h, s->sh.chroma_log2_weight_denom,
1635  chroma_weight, chroma_offset, _mx, _my, block_w);
1636 }
1637 
1638 /**
1639  * 8.5.3.2.2.2 Chroma sample bidirectional interpolation process
1640  *
1641  * @param s HEVC decoding context
1642  * @param dst target buffer for block data at block position
1643  * @param dststride stride of the dst buffer
1644  * @param ref0 reference picture0 buffer at origin (0, 0)
1645  * @param mv0 motion vector0 (relative to block position) to get pixel data from
1646  * @param x_off horizontal position of block from origin (0, 0)
1647  * @param y_off vertical position of block from origin (0, 0)
1648  * @param block_w width of block
1649  * @param block_h height of block
1650  * @param ref1 reference picture1 buffer at origin (0, 0)
1651  * @param mv1 motion vector1 (relative to block position) to get pixel data from
1652  * @param current_mv current motion vector structure
1653  * @param cidx chroma component(cb, cr)
1654  */
1655 static void chroma_mc_bi(HEVCContext *s, uint8_t *dst0, ptrdiff_t dststride, AVFrame *ref0, AVFrame *ref1,
1656  int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int cidx)
1657 {
1658  HEVCLocalContext *lc = s->HEVClc;
1659  uint8_t *src1 = ref0->data[cidx+1];
1660  uint8_t *src2 = ref1->data[cidx+1];
1661  ptrdiff_t src1stride = ref0->linesize[cidx+1];
1662  ptrdiff_t src2stride = ref1->linesize[cidx+1];
1663  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1664  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1665  int pic_width = s->ps.sps->width >> s->ps.sps->hshift[1];
1666  int pic_height = s->ps.sps->height >> s->ps.sps->vshift[1];
1667  Mv *mv0 = &current_mv->mv[0];
1668  Mv *mv1 = &current_mv->mv[1];
1669  int hshift = s->ps.sps->hshift[1];
1670  int vshift = s->ps.sps->vshift[1];
1671 
1672  intptr_t mx0 = av_mod_uintp2(mv0->x, 2 + hshift);
1673  intptr_t my0 = av_mod_uintp2(mv0->y, 2 + vshift);
1674  intptr_t mx1 = av_mod_uintp2(mv1->x, 2 + hshift);
1675  intptr_t my1 = av_mod_uintp2(mv1->y, 2 + vshift);
1676  intptr_t _mx0 = mx0 << (1 - hshift);
1677  intptr_t _my0 = my0 << (1 - vshift);
1678  intptr_t _mx1 = mx1 << (1 - hshift);
1679  intptr_t _my1 = my1 << (1 - vshift);
1680 
1681  int x_off0 = x_off + (mv0->x >> (2 + hshift));
1682  int y_off0 = y_off + (mv0->y >> (2 + vshift));
1683  int x_off1 = x_off + (mv1->x >> (2 + hshift));
1684  int y_off1 = y_off + (mv1->y >> (2 + vshift));
1685  int idx = ff_hevc_pel_weight[block_w];
1686  src1 += y_off0 * src1stride + (int)((unsigned)x_off0 << s->ps.sps->pixel_shift);
1687  src2 += y_off1 * src2stride + (int)((unsigned)x_off1 << s->ps.sps->pixel_shift);
1688 
1689  if (x_off0 < EPEL_EXTRA_BEFORE || y_off0 < EPEL_EXTRA_AFTER ||
1690  x_off0 >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1691  y_off0 >= pic_height - block_h - EPEL_EXTRA_AFTER) {
1692  const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1693  int offset1 = EPEL_EXTRA_BEFORE * (src1stride + (1 << s->ps.sps->pixel_shift));
1694  int buf_offset1 = EPEL_EXTRA_BEFORE *
1695  (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1696 
1697  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src1 - offset1,
1698  edge_emu_stride, src1stride,
1699  block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1700  x_off0 - EPEL_EXTRA_BEFORE,
1701  y_off0 - EPEL_EXTRA_BEFORE,
1702  pic_width, pic_height);
1703 
1704  src1 = lc->edge_emu_buffer + buf_offset1;
1705  src1stride = edge_emu_stride;
1706  }
1707 
1708  if (x_off1 < EPEL_EXTRA_BEFORE || y_off1 < EPEL_EXTRA_AFTER ||
1709  x_off1 >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1710  y_off1 >= pic_height - block_h - EPEL_EXTRA_AFTER) {
1711  const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1712  int offset1 = EPEL_EXTRA_BEFORE * (src2stride + (1 << s->ps.sps->pixel_shift));
1713  int buf_offset1 = EPEL_EXTRA_BEFORE *
1714  (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1715 
1716  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer2, src2 - offset1,
1717  edge_emu_stride, src2stride,
1718  block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1719  x_off1 - EPEL_EXTRA_BEFORE,
1720  y_off1 - EPEL_EXTRA_BEFORE,
1721  pic_width, pic_height);
1722 
1723  src2 = lc->edge_emu_buffer2 + buf_offset1;
1724  src2stride = edge_emu_stride;
1725  }
1726 
1727  s->hevcdsp.put_hevc_epel[idx][!!my0][!!mx0](lc->tmp, src1, src1stride,
1728  block_h, _mx0, _my0, block_w);
1729  if (!weight_flag)
1730  s->hevcdsp.put_hevc_epel_bi[idx][!!my1][!!mx1](dst0, s->frame->linesize[cidx+1],
1731  src2, src2stride, lc->tmp,
1732  block_h, _mx1, _my1, block_w);
1733  else
1734  s->hevcdsp.put_hevc_epel_bi_w[idx][!!my1][!!mx1](dst0, s->frame->linesize[cidx+1],
1735  src2, src2stride, lc->tmp,
1736  block_h,
1737  s->sh.chroma_log2_weight_denom,
1738  s->sh.chroma_weight_l0[current_mv->ref_idx[0]][cidx],
1739  s->sh.chroma_weight_l1[current_mv->ref_idx[1]][cidx],
1740  s->sh.chroma_offset_l0[current_mv->ref_idx[0]][cidx],
1741  s->sh.chroma_offset_l1[current_mv->ref_idx[1]][cidx],
1742  _mx1, _my1, block_w);
1743 }
1744 
1746  const Mv *mv, int y0, int height)
1747 {
1748  if (s->threads_type == FF_THREAD_FRAME ) {
1749  int y = FFMAX(0, (mv->y >> 2) + y0 + height + 9);
1750 
1751  ff_thread_await_progress(&ref->tf, y, 0);
1752  }
1753 }
1754 
1755 static void hevc_luma_mv_mvp_mode(HEVCContext *s, int x0, int y0, int nPbW,
1756  int nPbH, int log2_cb_size, int part_idx,
1757  int merge_idx, MvField *mv)
1758 {
1759  HEVCLocalContext *lc = s->HEVClc;
1760  enum InterPredIdc inter_pred_idc = PRED_L0;
1761  int mvp_flag;
1762 
1763  ff_hevc_set_neighbour_available(s, x0, y0, nPbW, nPbH);
1764  mv->pred_flag = 0;
1765  if (s->sh.slice_type == HEVC_SLICE_B)
1766  inter_pred_idc = ff_hevc_inter_pred_idc_decode(s, nPbW, nPbH);
1767 
1768  if (inter_pred_idc != PRED_L1) {
1769  if (s->sh.nb_refs[L0])
1770  mv->ref_idx[0]= ff_hevc_ref_idx_lx_decode(s, s->sh.nb_refs[L0]);
1771 
1772  mv->pred_flag = PF_L0;
1773  ff_hevc_hls_mvd_coding(s, x0, y0, 0);
1774  mvp_flag = ff_hevc_mvp_lx_flag_decode(s);
1775  ff_hevc_luma_mv_mvp_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
1776  part_idx, merge_idx, mv, mvp_flag, 0);
1777  mv->mv[0].x += lc->pu.mvd.x;
1778  mv->mv[0].y += lc->pu.mvd.y;
1779  }
1780 
1781  if (inter_pred_idc != PRED_L0) {
1782  if (s->sh.nb_refs[L1])
1783  mv->ref_idx[1]= ff_hevc_ref_idx_lx_decode(s, s->sh.nb_refs[L1]);
1784 
1785  if (s->sh.mvd_l1_zero_flag == 1 && inter_pred_idc == PRED_BI) {
1786  AV_ZERO32(&lc->pu.mvd);
1787  } else {
1788  ff_hevc_hls_mvd_coding(s, x0, y0, 1);
1789  }
1790 
1791  mv->pred_flag += PF_L1;
1792  mvp_flag = ff_hevc_mvp_lx_flag_decode(s);
1793  ff_hevc_luma_mv_mvp_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
1794  part_idx, merge_idx, mv, mvp_flag, 1);
1795  mv->mv[1].x += lc->pu.mvd.x;
1796  mv->mv[1].y += lc->pu.mvd.y;
1797  }
1798 }
1799 
1800 static void hls_prediction_unit(HEVCContext *s, int x0, int y0,
1801  int nPbW, int nPbH,
1802  int log2_cb_size, int partIdx, int idx)
1803 {
1804 #define POS(c_idx, x, y) \
1805  &s->frame->data[c_idx][((y) >> s->ps.sps->vshift[c_idx]) * s->frame->linesize[c_idx] + \
1806  (((x) >> s->ps.sps->hshift[c_idx]) << s->ps.sps->pixel_shift)]
1807  HEVCLocalContext *lc = s->HEVClc;
1808  int merge_idx = 0;
1809  struct MvField current_mv = {{{ 0 }}};
1810 
1811  int min_pu_width = s->ps.sps->min_pu_width;
1812 
1813  MvField *tab_mvf = s->ref->tab_mvf;
1814  RefPicList *refPicList = s->ref->refPicList;
1815  HEVCFrame *ref0 = NULL, *ref1 = NULL;
1816  uint8_t *dst0 = POS(0, x0, y0);
1817  uint8_t *dst1 = POS(1, x0, y0);
1818  uint8_t *dst2 = POS(2, x0, y0);
1819  int log2_min_cb_size = s->ps.sps->log2_min_cb_size;
1820  int min_cb_width = s->ps.sps->min_cb_width;
1821  int x_cb = x0 >> log2_min_cb_size;
1822  int y_cb = y0 >> log2_min_cb_size;
1823  int x_pu, y_pu;
1824  int i, j;
1825 
1826  int skip_flag = SAMPLE_CTB(s->skip_flag, x_cb, y_cb);
1827 
1828  if (!skip_flag)
1830 
1831  if (skip_flag || lc->pu.merge_flag) {
1832  if (s->sh.max_num_merge_cand > 1)
1833  merge_idx = ff_hevc_merge_idx_decode(s);
1834  else
1835  merge_idx = 0;
1836 
1837  ff_hevc_luma_mv_merge_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
1838  partIdx, merge_idx, &current_mv);
1839  } else {
1840  hevc_luma_mv_mvp_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
1841  partIdx, merge_idx, &current_mv);
1842  }
1843 
1844  x_pu = x0 >> s->ps.sps->log2_min_pu_size;
1845  y_pu = y0 >> s->ps.sps->log2_min_pu_size;
1846 
1847  for (j = 0; j < nPbH >> s->ps.sps->log2_min_pu_size; j++)
1848  for (i = 0; i < nPbW >> s->ps.sps->log2_min_pu_size; i++)
1849  tab_mvf[(y_pu + j) * min_pu_width + x_pu + i] = current_mv;
1850 
1851  if (current_mv.pred_flag & PF_L0) {
1852  ref0 = refPicList[0].ref[current_mv.ref_idx[0]];
1853  if (!ref0)
1854  return;
1855  hevc_await_progress(s, ref0, &current_mv.mv[0], y0, nPbH);
1856  }
1857  if (current_mv.pred_flag & PF_L1) {
1858  ref1 = refPicList[1].ref[current_mv.ref_idx[1]];
1859  if (!ref1)
1860  return;
1861  hevc_await_progress(s, ref1, &current_mv.mv[1], y0, nPbH);
1862  }
1863 
1864  if (current_mv.pred_flag == PF_L0) {
1865  int x0_c = x0 >> s->ps.sps->hshift[1];
1866  int y0_c = y0 >> s->ps.sps->vshift[1];
1867  int nPbW_c = nPbW >> s->ps.sps->hshift[1];
1868  int nPbH_c = nPbH >> s->ps.sps->vshift[1];
1869 
1870  luma_mc_uni(s, dst0, s->frame->linesize[0], ref0->frame,
1871  &current_mv.mv[0], x0, y0, nPbW, nPbH,
1872  s->sh.luma_weight_l0[current_mv.ref_idx[0]],
1873  s->sh.luma_offset_l0[current_mv.ref_idx[0]]);
1874 
1875  if (s->ps.sps->chroma_format_idc) {
1876  chroma_mc_uni(s, dst1, s->frame->linesize[1], ref0->frame->data[1], ref0->frame->linesize[1],
1877  0, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1878  s->sh.chroma_weight_l0[current_mv.ref_idx[0]][0], s->sh.chroma_offset_l0[current_mv.ref_idx[0]][0]);
1879  chroma_mc_uni(s, dst2, s->frame->linesize[2], ref0->frame->data[2], ref0->frame->linesize[2],
1880  0, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1881  s->sh.chroma_weight_l0[current_mv.ref_idx[0]][1], s->sh.chroma_offset_l0[current_mv.ref_idx[0]][1]);
1882  }
1883  } else if (current_mv.pred_flag == PF_L1) {
1884  int x0_c = x0 >> s->ps.sps->hshift[1];
1885  int y0_c = y0 >> s->ps.sps->vshift[1];
1886  int nPbW_c = nPbW >> s->ps.sps->hshift[1];
1887  int nPbH_c = nPbH >> s->ps.sps->vshift[1];
1888 
1889  luma_mc_uni(s, dst0, s->frame->linesize[0], ref1->frame,
1890  &current_mv.mv[1], x0, y0, nPbW, nPbH,
1891  s->sh.luma_weight_l1[current_mv.ref_idx[1]],
1892  s->sh.luma_offset_l1[current_mv.ref_idx[1]]);
1893 
1894  if (s->ps.sps->chroma_format_idc) {
1895  chroma_mc_uni(s, dst1, s->frame->linesize[1], ref1->frame->data[1], ref1->frame->linesize[1],
1896  1, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1897  s->sh.chroma_weight_l1[current_mv.ref_idx[1]][0], s->sh.chroma_offset_l1[current_mv.ref_idx[1]][0]);
1898 
1899  chroma_mc_uni(s, dst2, s->frame->linesize[2], ref1->frame->data[2], ref1->frame->linesize[2],
1900  1, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1901  s->sh.chroma_weight_l1[current_mv.ref_idx[1]][1], s->sh.chroma_offset_l1[current_mv.ref_idx[1]][1]);
1902  }
1903  } else if (current_mv.pred_flag == PF_BI) {
1904  int x0_c = x0 >> s->ps.sps->hshift[1];
1905  int y0_c = y0 >> s->ps.sps->vshift[1];
1906  int nPbW_c = nPbW >> s->ps.sps->hshift[1];
1907  int nPbH_c = nPbH >> s->ps.sps->vshift[1];
1908 
1909  luma_mc_bi(s, dst0, s->frame->linesize[0], ref0->frame,
1910  &current_mv.mv[0], x0, y0, nPbW, nPbH,
1911  ref1->frame, &current_mv.mv[1], &current_mv);
1912 
1913  if (s->ps.sps->chroma_format_idc) {
1914  chroma_mc_bi(s, dst1, s->frame->linesize[1], ref0->frame, ref1->frame,
1915  x0_c, y0_c, nPbW_c, nPbH_c, &current_mv, 0);
1916 
1917  chroma_mc_bi(s, dst2, s->frame->linesize[2], ref0->frame, ref1->frame,
1918  x0_c, y0_c, nPbW_c, nPbH_c, &current_mv, 1);
1919  }
1920  }
1921 }
1922 
1923 /**
1924  * 8.4.1
1925  */
1926 static int luma_intra_pred_mode(HEVCContext *s, int x0, int y0, int pu_size,
1927  int prev_intra_luma_pred_flag)
1928 {
1929  HEVCLocalContext *lc = s->HEVClc;
1930  int x_pu = x0 >> s->ps.sps->log2_min_pu_size;
1931  int y_pu = y0 >> s->ps.sps->log2_min_pu_size;
1932  int min_pu_width = s->ps.sps->min_pu_width;
1933  int size_in_pus = pu_size >> s->ps.sps->log2_min_pu_size;
1934  int x0b = av_mod_uintp2(x0, s->ps.sps->log2_ctb_size);
1935  int y0b = av_mod_uintp2(y0, s->ps.sps->log2_ctb_size);
1936 
1937  int cand_up = (lc->ctb_up_flag || y0b) ?
1938  s->tab_ipm[(y_pu - 1) * min_pu_width + x_pu] : INTRA_DC;
1939  int cand_left = (lc->ctb_left_flag || x0b) ?
1940  s->tab_ipm[y_pu * min_pu_width + x_pu - 1] : INTRA_DC;
1941 
1942  int y_ctb = (y0 >> (s->ps.sps->log2_ctb_size)) << (s->ps.sps->log2_ctb_size);
1943 
1944  MvField *tab_mvf = s->ref->tab_mvf;
1945  int intra_pred_mode;
1946  int candidate[3];
1947  int i, j;
1948 
1949  // intra_pred_mode prediction does not cross vertical CTB boundaries
1950  if ((y0 - 1) < y_ctb)
1951  cand_up = INTRA_DC;
1952 
1953  if (cand_left == cand_up) {
1954  if (cand_left < 2) {
1955  candidate[0] = INTRA_PLANAR;
1956  candidate[1] = INTRA_DC;
1957  candidate[2] = INTRA_ANGULAR_26;
1958  } else {
1959  candidate[0] = cand_left;
1960  candidate[1] = 2 + ((cand_left - 2 - 1 + 32) & 31);
1961  candidate[2] = 2 + ((cand_left - 2 + 1) & 31);
1962  }
1963  } else {
1964  candidate[0] = cand_left;
1965  candidate[1] = cand_up;
1966  if (candidate[0] != INTRA_PLANAR && candidate[1] != INTRA_PLANAR) {
1967  candidate[2] = INTRA_PLANAR;
1968  } else if (candidate[0] != INTRA_DC && candidate[1] != INTRA_DC) {
1969  candidate[2] = INTRA_DC;
1970  } else {
1971  candidate[2] = INTRA_ANGULAR_26;
1972  }
1973  }
1974 
1975  if (prev_intra_luma_pred_flag) {
1976  intra_pred_mode = candidate[lc->pu.mpm_idx];
1977  } else {
1978  if (candidate[0] > candidate[1])
1979  FFSWAP(uint8_t, candidate[0], candidate[1]);
1980  if (candidate[0] > candidate[2])
1981  FFSWAP(uint8_t, candidate[0], candidate[2]);
1982  if (candidate[1] > candidate[2])
1983  FFSWAP(uint8_t, candidate[1], candidate[2]);
1984 
1985  intra_pred_mode = lc->pu.rem_intra_luma_pred_mode;
1986  for (i = 0; i < 3; i++)
1987  if (intra_pred_mode >= candidate[i])
1988  intra_pred_mode++;
1989  }
1990 
1991  /* write the intra prediction units into the mv array */
1992  if (!size_in_pus)
1993  size_in_pus = 1;
1994  for (i = 0; i < size_in_pus; i++) {
1995  memset(&s->tab_ipm[(y_pu + i) * min_pu_width + x_pu],
1996  intra_pred_mode, size_in_pus);
1997 
1998  for (j = 0; j < size_in_pus; j++) {
1999  tab_mvf[(y_pu + j) * min_pu_width + x_pu + i].pred_flag = PF_INTRA;
2000  }
2001  }
2002 
2003  return intra_pred_mode;
2004 }
2005 
2006 static av_always_inline void set_ct_depth(HEVCContext *s, int x0, int y0,
2007  int log2_cb_size, int ct_depth)
2008 {
2009  int length = (1 << log2_cb_size) >> s->ps.sps->log2_min_cb_size;
2010  int x_cb = x0 >> s->ps.sps->log2_min_cb_size;
2011  int y_cb = y0 >> s->ps.sps->log2_min_cb_size;
2012  int y;
2013 
2014  for (y = 0; y < length; y++)
2015  memset(&s->tab_ct_depth[(y_cb + y) * s->ps.sps->min_cb_width + x_cb],
2016  ct_depth, length);
2017 }
2018 
2019 static const uint8_t tab_mode_idx[] = {
2020  0, 1, 2, 2, 2, 2, 3, 5, 7, 8, 10, 12, 13, 15, 17, 18, 19, 20,
2021  21, 22, 23, 23, 24, 24, 25, 25, 26, 27, 27, 28, 28, 29, 29, 30, 31};
2022 
2023 static void intra_prediction_unit(HEVCContext *s, int x0, int y0,
2024  int log2_cb_size)
2025 {
2026  HEVCLocalContext *lc = s->HEVClc;
2027  static const uint8_t intra_chroma_table[4] = { 0, 26, 10, 1 };
2028  uint8_t prev_intra_luma_pred_flag[4];
2029  int split = lc->cu.part_mode == PART_NxN;
2030  int pb_size = (1 << log2_cb_size) >> split;
2031  int side = split + 1;
2032  int chroma_mode;
2033  int i, j;
2034 
2035  for (i = 0; i < side; i++)
2036  for (j = 0; j < side; j++)
2037  prev_intra_luma_pred_flag[2 * i + j] = ff_hevc_prev_intra_luma_pred_flag_decode(s);
2038 
2039  for (i = 0; i < side; i++) {
2040  for (j = 0; j < side; j++) {
2041  if (prev_intra_luma_pred_flag[2 * i + j])
2043  else
2045 
2046  lc->pu.intra_pred_mode[2 * i + j] =
2047  luma_intra_pred_mode(s, x0 + pb_size * j, y0 + pb_size * i, pb_size,
2048  prev_intra_luma_pred_flag[2 * i + j]);
2049  }
2050  }
2051 
2052  if (s->ps.sps->chroma_format_idc == 3) {
2053  for (i = 0; i < side; i++) {
2054  for (j = 0; j < side; j++) {
2055  lc->pu.chroma_mode_c[2 * i + j] = chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(s);
2056  if (chroma_mode != 4) {
2057  if (lc->pu.intra_pred_mode[2 * i + j] == intra_chroma_table[chroma_mode])
2058  lc->pu.intra_pred_mode_c[2 * i + j] = 34;
2059  else
2060  lc->pu.intra_pred_mode_c[2 * i + j] = intra_chroma_table[chroma_mode];
2061  } else {
2062  lc->pu.intra_pred_mode_c[2 * i + j] = lc->pu.intra_pred_mode[2 * i + j];
2063  }
2064  }
2065  }
2066  } else if (s->ps.sps->chroma_format_idc == 2) {
2067  int mode_idx;
2068  lc->pu.chroma_mode_c[0] = chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(s);
2069  if (chroma_mode != 4) {
2070  if (lc->pu.intra_pred_mode[0] == intra_chroma_table[chroma_mode])
2071  mode_idx = 34;
2072  else
2073  mode_idx = intra_chroma_table[chroma_mode];
2074  } else {
2075  mode_idx = lc->pu.intra_pred_mode[0];
2076  }
2077  lc->pu.intra_pred_mode_c[0] = tab_mode_idx[mode_idx];
2078  } else if (s->ps.sps->chroma_format_idc != 0) {
2080  if (chroma_mode != 4) {
2081  if (lc->pu.intra_pred_mode[0] == intra_chroma_table[chroma_mode])
2082  lc->pu.intra_pred_mode_c[0] = 34;
2083  else
2084  lc->pu.intra_pred_mode_c[0] = intra_chroma_table[chroma_mode];
2085  } else {
2086  lc->pu.intra_pred_mode_c[0] = lc->pu.intra_pred_mode[0];
2087  }
2088  }
2089 }
2090 
2092  int x0, int y0,
2093  int log2_cb_size)
2094 {
2095  HEVCLocalContext *lc = s->HEVClc;
2096  int pb_size = 1 << log2_cb_size;
2097  int size_in_pus = pb_size >> s->ps.sps->log2_min_pu_size;
2098  int min_pu_width = s->ps.sps->min_pu_width;
2099  MvField *tab_mvf = s->ref->tab_mvf;
2100  int x_pu = x0 >> s->ps.sps->log2_min_pu_size;
2101  int y_pu = y0 >> s->ps.sps->log2_min_pu_size;
2102  int j, k;
2103 
2104  if (size_in_pus == 0)
2105  size_in_pus = 1;
2106  for (j = 0; j < size_in_pus; j++)
2107  memset(&s->tab_ipm[(y_pu + j) * min_pu_width + x_pu], INTRA_DC, size_in_pus);
2108  if (lc->cu.pred_mode == MODE_INTRA)
2109  for (j = 0; j < size_in_pus; j++)
2110  for (k = 0; k < size_in_pus; k++)
2111  tab_mvf[(y_pu + j) * min_pu_width + x_pu + k].pred_flag = PF_INTRA;
2112 }
2113 
2114 static int hls_coding_unit(HEVCContext *s, int x0, int y0, int log2_cb_size)
2115 {
2116  int cb_size = 1 << log2_cb_size;
2117  HEVCLocalContext *lc = s->HEVClc;
2118  int log2_min_cb_size = s->ps.sps->log2_min_cb_size;
2119  int length = cb_size >> log2_min_cb_size;
2120  int min_cb_width = s->ps.sps->min_cb_width;
2121  int x_cb = x0 >> log2_min_cb_size;
2122  int y_cb = y0 >> log2_min_cb_size;
2123  int idx = log2_cb_size - 2;
2124  int qp_block_mask = (1<<(s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth)) - 1;
2125  int x, y, ret;
2126 
2127  lc->cu.x = x0;
2128  lc->cu.y = y0;
2129  lc->cu.pred_mode = MODE_INTRA;
2130  lc->cu.part_mode = PART_2Nx2N;
2131  lc->cu.intra_split_flag = 0;
2132 
2133  SAMPLE_CTB(s->skip_flag, x_cb, y_cb) = 0;
2134  for (x = 0; x < 4; x++)
2135  lc->pu.intra_pred_mode[x] = 1;
2136  if (s->ps.pps->transquant_bypass_enable_flag) {
2138  if (lc->cu.cu_transquant_bypass_flag)
2139  set_deblocking_bypass(s, x0, y0, log2_cb_size);
2140  } else
2141  lc->cu.cu_transquant_bypass_flag = 0;
2142 
2143  if (s->sh.slice_type != HEVC_SLICE_I) {
2144  uint8_t skip_flag = ff_hevc_skip_flag_decode(s, x0, y0, x_cb, y_cb);
2145 
2146  x = y_cb * min_cb_width + x_cb;
2147  for (y = 0; y < length; y++) {
2148  memset(&s->skip_flag[x], skip_flag, length);
2149  x += min_cb_width;
2150  }
2151  lc->cu.pred_mode = skip_flag ? MODE_SKIP : MODE_INTER;
2152  } else {
2153  x = y_cb * min_cb_width + x_cb;
2154  for (y = 0; y < length; y++) {
2155  memset(&s->skip_flag[x], 0, length);
2156  x += min_cb_width;
2157  }
2158  }
2159 
2160  if (SAMPLE_CTB(s->skip_flag, x_cb, y_cb)) {
2161  hls_prediction_unit(s, x0, y0, cb_size, cb_size, log2_cb_size, 0, idx);
2162  intra_prediction_unit_default_value(s, x0, y0, log2_cb_size);
2163 
2164  if (!s->sh.disable_deblocking_filter_flag)
2165  ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_cb_size);
2166  } else {
2167  int pcm_flag = 0;
2168 
2169  if (s->sh.slice_type != HEVC_SLICE_I)
2171  if (lc->cu.pred_mode != MODE_INTRA ||
2172  log2_cb_size == s->ps.sps->log2_min_cb_size) {
2173  lc->cu.part_mode = ff_hevc_part_mode_decode(s, log2_cb_size);
2174  lc->cu.intra_split_flag = lc->cu.part_mode == PART_NxN &&
2175  lc->cu.pred_mode == MODE_INTRA;
2176  }
2177 
2178  if (lc->cu.pred_mode == MODE_INTRA) {
2179  if (lc->cu.part_mode == PART_2Nx2N && s->ps.sps->pcm_enabled_flag &&
2180  log2_cb_size >= s->ps.sps->pcm.log2_min_pcm_cb_size &&
2181  log2_cb_size <= s->ps.sps->pcm.log2_max_pcm_cb_size) {
2182  pcm_flag = ff_hevc_pcm_flag_decode(s);
2183  }
2184  if (pcm_flag) {
2185  intra_prediction_unit_default_value(s, x0, y0, log2_cb_size);
2186  ret = hls_pcm_sample(s, x0, y0, log2_cb_size);
2187  if (s->ps.sps->pcm.loop_filter_disable_flag)
2188  set_deblocking_bypass(s, x0, y0, log2_cb_size);
2189 
2190  if (ret < 0)
2191  return ret;
2192  } else {
2193  intra_prediction_unit(s, x0, y0, log2_cb_size);
2194  }
2195  } else {
2196  intra_prediction_unit_default_value(s, x0, y0, log2_cb_size);
2197  switch (lc->cu.part_mode) {
2198  case PART_2Nx2N:
2199  hls_prediction_unit(s, x0, y0, cb_size, cb_size, log2_cb_size, 0, idx);
2200  break;
2201  case PART_2NxN:
2202  hls_prediction_unit(s, x0, y0, cb_size, cb_size / 2, log2_cb_size, 0, idx);
2203  hls_prediction_unit(s, x0, y0 + cb_size / 2, cb_size, cb_size / 2, log2_cb_size, 1, idx);
2204  break;
2205  case PART_Nx2N:
2206  hls_prediction_unit(s, x0, y0, cb_size / 2, cb_size, log2_cb_size, 0, idx - 1);
2207  hls_prediction_unit(s, x0 + cb_size / 2, y0, cb_size / 2, cb_size, log2_cb_size, 1, idx - 1);
2208  break;
2209  case PART_2NxnU:
2210  hls_prediction_unit(s, x0, y0, cb_size, cb_size / 4, log2_cb_size, 0, idx);
2211  hls_prediction_unit(s, x0, y0 + cb_size / 4, cb_size, cb_size * 3 / 4, log2_cb_size, 1, idx);
2212  break;
2213  case PART_2NxnD:
2214  hls_prediction_unit(s, x0, y0, cb_size, cb_size * 3 / 4, log2_cb_size, 0, idx);
2215  hls_prediction_unit(s, x0, y0 + cb_size * 3 / 4, cb_size, cb_size / 4, log2_cb_size, 1, idx);
2216  break;
2217  case PART_nLx2N:
2218  hls_prediction_unit(s, x0, y0, cb_size / 4, cb_size, log2_cb_size, 0, idx - 2);
2219  hls_prediction_unit(s, x0 + cb_size / 4, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 1, idx - 2);
2220  break;
2221  case PART_nRx2N:
2222  hls_prediction_unit(s, x0, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 0, idx - 2);
2223  hls_prediction_unit(s, x0 + cb_size * 3 / 4, y0, cb_size / 4, cb_size, log2_cb_size, 1, idx - 2);
2224  break;
2225  case PART_NxN:
2226  hls_prediction_unit(s, x0, y0, cb_size / 2, cb_size / 2, log2_cb_size, 0, idx - 1);
2227  hls_prediction_unit(s, x0 + cb_size / 2, y0, cb_size / 2, cb_size / 2, log2_cb_size, 1, idx - 1);
2228  hls_prediction_unit(s, x0, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 2, idx - 1);
2229  hls_prediction_unit(s, x0 + cb_size / 2, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 3, idx - 1);
2230  break;
2231  }
2232  }
2233 
2234  if (!pcm_flag) {
2235  int rqt_root_cbf = 1;
2236 
2237  if (lc->cu.pred_mode != MODE_INTRA &&
2238  !(lc->cu.part_mode == PART_2Nx2N && lc->pu.merge_flag)) {
2239  rqt_root_cbf = ff_hevc_no_residual_syntax_flag_decode(s);
2240  }
2241  if (rqt_root_cbf) {
2242  const static int cbf[2] = { 0 };
2243  lc->cu.max_trafo_depth = lc->cu.pred_mode == MODE_INTRA ?
2244  s->ps.sps->max_transform_hierarchy_depth_intra + lc->cu.intra_split_flag :
2245  s->ps.sps->max_transform_hierarchy_depth_inter;
2246  ret = hls_transform_tree(s, x0, y0, x0, y0, x0, y0,
2247  log2_cb_size,
2248  log2_cb_size, 0, 0, cbf, cbf);
2249  if (ret < 0)
2250  return ret;
2251  } else {
2252  if (!s->sh.disable_deblocking_filter_flag)
2253  ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_cb_size);
2254  }
2255  }
2256  }
2257 
2258  if (s->ps.pps->cu_qp_delta_enabled_flag && lc->tu.is_cu_qp_delta_coded == 0)
2259  ff_hevc_set_qPy(s, x0, y0, log2_cb_size);
2260 
2261  x = y_cb * min_cb_width + x_cb;
2262  for (y = 0; y < length; y++) {
2263  memset(&s->qp_y_tab[x], lc->qp_y, length);
2264  x += min_cb_width;
2265  }
2266 
2267  if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
2268  ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0) {
2269  lc->qPy_pred = lc->qp_y;
2270  }
2271 
2272  set_ct_depth(s, x0, y0, log2_cb_size, lc->ct_depth);
2273 
2274  return 0;
2275 }
2276 
2277 static int hls_coding_quadtree(HEVCContext *s, int x0, int y0,
2278  int log2_cb_size, int cb_depth)
2279 {
2280  HEVCLocalContext *lc = s->HEVClc;
2281  const int cb_size = 1 << log2_cb_size;
2282  int ret;
2283  int split_cu;
2284 
2285  lc->ct_depth = cb_depth;
2286  if (x0 + cb_size <= s->ps.sps->width &&
2287  y0 + cb_size <= s->ps.sps->height &&
2288  log2_cb_size > s->ps.sps->log2_min_cb_size) {
2289  split_cu = ff_hevc_split_coding_unit_flag_decode(s, cb_depth, x0, y0);
2290  } else {
2291  split_cu = (log2_cb_size > s->ps.sps->log2_min_cb_size);
2292  }
2293  if (s->ps.pps->cu_qp_delta_enabled_flag &&
2294  log2_cb_size >= s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth) {
2295  lc->tu.is_cu_qp_delta_coded = 0;
2296  lc->tu.cu_qp_delta = 0;
2297  }
2298 
2299  if (s->sh.cu_chroma_qp_offset_enabled_flag &&
2300  log2_cb_size >= s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_chroma_qp_offset_depth) {
2302  }
2303 
2304  if (split_cu) {
2305  int qp_block_mask = (1<<(s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth)) - 1;
2306  const int cb_size_split = cb_size >> 1;
2307  const int x1 = x0 + cb_size_split;
2308  const int y1 = y0 + cb_size_split;
2309 
2310  int more_data = 0;
2311 
2312  more_data = hls_coding_quadtree(s, x0, y0, log2_cb_size - 1, cb_depth + 1);
2313  if (more_data < 0)
2314  return more_data;
2315 
2316  if (more_data && x1 < s->ps.sps->width) {
2317  more_data = hls_coding_quadtree(s, x1, y0, log2_cb_size - 1, cb_depth + 1);
2318  if (more_data < 0)
2319  return more_data;
2320  }
2321  if (more_data && y1 < s->ps.sps->height) {
2322  more_data = hls_coding_quadtree(s, x0, y1, log2_cb_size - 1, cb_depth + 1);
2323  if (more_data < 0)
2324  return more_data;
2325  }
2326  if (more_data && x1 < s->ps.sps->width &&
2327  y1 < s->ps.sps->height) {
2328  more_data = hls_coding_quadtree(s, x1, y1, log2_cb_size - 1, cb_depth + 1);
2329  if (more_data < 0)
2330  return more_data;
2331  }
2332 
2333  if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
2334  ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0)
2335  lc->qPy_pred = lc->qp_y;
2336 
2337  if (more_data)
2338  return ((x1 + cb_size_split) < s->ps.sps->width ||
2339  (y1 + cb_size_split) < s->ps.sps->height);
2340  else
2341  return 0;
2342  } else {
2343  ret = hls_coding_unit(s, x0, y0, log2_cb_size);
2344  if (ret < 0)
2345  return ret;
2346  if ((!((x0 + cb_size) %
2347  (1 << (s->ps.sps->log2_ctb_size))) ||
2348  (x0 + cb_size >= s->ps.sps->width)) &&
2349  (!((y0 + cb_size) %
2350  (1 << (s->ps.sps->log2_ctb_size))) ||
2351  (y0 + cb_size >= s->ps.sps->height))) {
2352  int end_of_slice_flag = ff_hevc_end_of_slice_flag_decode(s);
2353  return !end_of_slice_flag;
2354  } else {
2355  return 1;
2356  }
2357  }
2358 
2359  return 0;
2360 }
2361 
2362 static void hls_decode_neighbour(HEVCContext *s, int x_ctb, int y_ctb,
2363  int ctb_addr_ts)
2364 {
2365  HEVCLocalContext *lc = s->HEVClc;
2366  int ctb_size = 1 << s->ps.sps->log2_ctb_size;
2367  int ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2368  int ctb_addr_in_slice = ctb_addr_rs - s->sh.slice_addr;
2369 
2370  s->tab_slice_address[ctb_addr_rs] = s->sh.slice_addr;
2371 
2372  if (s->ps.pps->entropy_coding_sync_enabled_flag) {
2373  if (x_ctb == 0 && (y_ctb & (ctb_size - 1)) == 0)
2374  lc->first_qp_group = 1;
2375  lc->end_of_tiles_x = s->ps.sps->width;
2376  } else if (s->ps.pps->tiles_enabled_flag) {
2377  if (ctb_addr_ts && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[ctb_addr_ts - 1]) {
2378  int idxX = s->ps.pps->col_idxX[x_ctb >> s->ps.sps->log2_ctb_size];
2379  lc->end_of_tiles_x = x_ctb + (s->ps.pps->column_width[idxX] << s->ps.sps->log2_ctb_size);
2380  lc->first_qp_group = 1;
2381  }
2382  } else {
2383  lc->end_of_tiles_x = s->ps.sps->width;
2384  }
2385 
2386  lc->end_of_tiles_y = FFMIN(y_ctb + ctb_size, s->ps.sps->height);
2387 
2388  lc->boundary_flags = 0;
2389  if (s->ps.pps->tiles_enabled_flag) {
2390  if (x_ctb > 0 && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - 1]])
2392  if (x_ctb > 0 && s->tab_slice_address[ctb_addr_rs] != s->tab_slice_address[ctb_addr_rs - 1])
2394  if (y_ctb > 0 && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - s->ps.sps->ctb_width]])
2396  if (y_ctb > 0 && s->tab_slice_address[ctb_addr_rs] != s->tab_slice_address[ctb_addr_rs - s->ps.sps->ctb_width])
2398  } else {
2399  if (ctb_addr_in_slice <= 0)
2401  if (ctb_addr_in_slice < s->ps.sps->ctb_width)
2403  }
2404 
2405  lc->ctb_left_flag = ((x_ctb > 0) && (ctb_addr_in_slice > 0) && !(lc->boundary_flags & BOUNDARY_LEFT_TILE));
2406  lc->ctb_up_flag = ((y_ctb > 0) && (ctb_addr_in_slice >= s->ps.sps->ctb_width) && !(lc->boundary_flags & BOUNDARY_UPPER_TILE));
2407  lc->ctb_up_right_flag = ((y_ctb > 0) && (ctb_addr_in_slice+1 >= s->ps.sps->ctb_width) && (s->ps.pps->tile_id[ctb_addr_ts] == s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs+1 - s->ps.sps->ctb_width]]));
2408  lc->ctb_up_left_flag = ((x_ctb > 0) && (y_ctb > 0) && (ctb_addr_in_slice-1 >= s->ps.sps->ctb_width) && (s->ps.pps->tile_id[ctb_addr_ts] == s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs-1 - s->ps.sps->ctb_width]]));
2409 }
2410 
2411 static int hls_decode_entry(AVCodecContext *avctxt, void *isFilterThread)
2412 {
2413  HEVCContext *s = avctxt->priv_data;
2414  int ctb_size = 1 << s->ps.sps->log2_ctb_size;
2415  int more_data = 1;
2416  int x_ctb = 0;
2417  int y_ctb = 0;
2418  int ctb_addr_ts = s->ps.pps->ctb_addr_rs_to_ts[s->sh.slice_ctb_addr_rs];
2419  int ret;
2420 
2421  if (!ctb_addr_ts && s->sh.dependent_slice_segment_flag) {
2422  av_log(s->avctx, AV_LOG_ERROR, "Impossible initial tile.\n");
2423  return AVERROR_INVALIDDATA;
2424  }
2425 
2426  if (s->sh.dependent_slice_segment_flag) {
2427  int prev_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts - 1];
2428  if (s->tab_slice_address[prev_rs] != s->sh.slice_addr) {
2429  av_log(s->avctx, AV_LOG_ERROR, "Previous slice segment missing\n");
2430  return AVERROR_INVALIDDATA;
2431  }
2432  }
2433 
2434  while (more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
2435  int ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2436 
2437  x_ctb = (ctb_addr_rs % ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size)) << s->ps.sps->log2_ctb_size;
2438  y_ctb = (ctb_addr_rs / ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size)) << s->ps.sps->log2_ctb_size;
2439  hls_decode_neighbour(s, x_ctb, y_ctb, ctb_addr_ts);
2440 
2441  ret = ff_hevc_cabac_init(s, ctb_addr_ts);
2442  if (ret < 0) {
2443  s->tab_slice_address[ctb_addr_rs] = -1;
2444  return ret;
2445  }
2446 
2447  hls_sao_param(s, x_ctb >> s->ps.sps->log2_ctb_size, y_ctb >> s->ps.sps->log2_ctb_size);
2448 
2449  s->deblock[ctb_addr_rs].beta_offset = s->sh.beta_offset;
2450  s->deblock[ctb_addr_rs].tc_offset = s->sh.tc_offset;
2451  s->filter_slice_edges[ctb_addr_rs] = s->sh.slice_loop_filter_across_slices_enabled_flag;
2452 
2453  more_data = hls_coding_quadtree(s, x_ctb, y_ctb, s->ps.sps->log2_ctb_size, 0);
2454  if (more_data < 0) {
2455  s->tab_slice_address[ctb_addr_rs] = -1;
2456  return more_data;
2457  }
2458 
2459 
2460  ctb_addr_ts++;
2461  ff_hevc_save_states(s, ctb_addr_ts);
2462  ff_hevc_hls_filters(s, x_ctb, y_ctb, ctb_size);
2463  }
2464 
2465  if (x_ctb + ctb_size >= s->ps.sps->width &&
2466  y_ctb + ctb_size >= s->ps.sps->height)
2467  ff_hevc_hls_filter(s, x_ctb, y_ctb, ctb_size);
2468 
2469  return ctb_addr_ts;
2470 }
2471 
2473 {
2474  int arg[2];
2475  int ret[2];
2476 
2477  arg[0] = 0;
2478  arg[1] = 1;
2479 
2480  s->avctx->execute(s->avctx, hls_decode_entry, arg, ret , 1, sizeof(int));
2481  return ret[0];
2482 }
2483 static int hls_decode_entry_wpp(AVCodecContext *avctxt, void *input_ctb_row, int job, int self_id)
2484 {
2485  HEVCContext *s1 = avctxt->priv_data, *s;
2486  HEVCLocalContext *lc;
2487  int ctb_size = 1<< s1->ps.sps->log2_ctb_size;
2488  int more_data = 1;
2489  int *ctb_row_p = input_ctb_row;
2490  int ctb_row = ctb_row_p[job];
2491  int ctb_addr_rs = s1->sh.slice_ctb_addr_rs + ctb_row * ((s1->ps.sps->width + ctb_size - 1) >> s1->ps.sps->log2_ctb_size);
2492  int ctb_addr_ts = s1->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs];
2493  int thread = ctb_row % s1->threads_number;
2494  int ret;
2495 
2496  s = s1->sList[self_id];
2497  lc = s->HEVClc;
2498 
2499  if(ctb_row) {
2500  ret = init_get_bits8(&lc->gb, s->data + s->sh.offset[ctb_row - 1], s->sh.size[ctb_row - 1]);
2501  if (ret < 0)
2502  goto error;
2503  ff_init_cabac_decoder(&lc->cc, s->data + s->sh.offset[(ctb_row)-1], s->sh.size[ctb_row - 1]);
2504  }
2505 
2506  while(more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
2507  int x_ctb = (ctb_addr_rs % s->ps.sps->ctb_width) << s->ps.sps->log2_ctb_size;
2508  int y_ctb = (ctb_addr_rs / s->ps.sps->ctb_width) << s->ps.sps->log2_ctb_size;
2509 
2510  hls_decode_neighbour(s, x_ctb, y_ctb, ctb_addr_ts);
2511 
2512  ff_thread_await_progress2(s->avctx, ctb_row, thread, SHIFT_CTB_WPP);
2513 
2514  if (atomic_load(&s1->wpp_err)) {
2515  ff_thread_report_progress2(s->avctx, ctb_row , thread, SHIFT_CTB_WPP);
2516  return 0;
2517  }
2518 
2519  ret = ff_hevc_cabac_init(s, ctb_addr_ts);
2520  if (ret < 0)
2521  goto error;
2522  hls_sao_param(s, x_ctb >> s->ps.sps->log2_ctb_size, y_ctb >> s->ps.sps->log2_ctb_size);
2523  more_data = hls_coding_quadtree(s, x_ctb, y_ctb, s->ps.sps->log2_ctb_size, 0);
2524 
2525  if (more_data < 0) {
2526  ret = more_data;
2527  goto error;
2528  }
2529 
2530  ctb_addr_ts++;
2531 
2532  ff_hevc_save_states(s, ctb_addr_ts);
2533  ff_thread_report_progress2(s->avctx, ctb_row, thread, 1);
2534  ff_hevc_hls_filters(s, x_ctb, y_ctb, ctb_size);
2535 
2536  if (!more_data && (x_ctb+ctb_size) < s->ps.sps->width && ctb_row != s->sh.num_entry_point_offsets) {
2537  atomic_store(&s1->wpp_err, 1);
2538  ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2539  return 0;
2540  }
2541 
2542  if ((x_ctb+ctb_size) >= s->ps.sps->width && (y_ctb+ctb_size) >= s->ps.sps->height ) {
2543  ff_hevc_hls_filter(s, x_ctb, y_ctb, ctb_size);
2544  ff_thread_report_progress2(s->avctx, ctb_row , thread, SHIFT_CTB_WPP);
2545  return ctb_addr_ts;
2546  }
2547  ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2548  x_ctb+=ctb_size;
2549 
2550  if(x_ctb >= s->ps.sps->width) {
2551  break;
2552  }
2553  }
2554  ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2555 
2556  return 0;
2557 error:
2558  s->tab_slice_address[ctb_addr_rs] = -1;
2559  atomic_store(&s1->wpp_err, 1);
2560  ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2561  return ret;
2562 }
2563 
2564 static int hls_slice_data_wpp(HEVCContext *s, const H2645NAL *nal)
2565 {
2566  const uint8_t *data = nal->data;
2567  int length = nal->size;
2568  HEVCLocalContext *lc = s->HEVClc;
2569  int *ret = av_malloc_array(s->sh.num_entry_point_offsets + 1, sizeof(int));
2570  int *arg = av_malloc_array(s->sh.num_entry_point_offsets + 1, sizeof(int));
2571  int64_t offset;
2572  int64_t startheader, cmpt = 0;
2573  int i, j, res = 0;
2574 
2575  if (!ret || !arg) {
2576  av_free(ret);
2577  av_free(arg);
2578  return AVERROR(ENOMEM);
2579  }
2580 
2581  if (s->sh.slice_ctb_addr_rs + s->sh.num_entry_point_offsets * s->ps.sps->ctb_width >= s->ps.sps->ctb_width * s->ps.sps->ctb_height) {
2582  av_log(s->avctx, AV_LOG_ERROR, "WPP ctb addresses are wrong (%d %d %d %d)\n",
2583  s->sh.slice_ctb_addr_rs, s->sh.num_entry_point_offsets,
2584  s->ps.sps->ctb_width, s->ps.sps->ctb_height
2585  );
2586  res = AVERROR_INVALIDDATA;
2587  goto error;
2588  }
2589 
2590  ff_alloc_entries(s->avctx, s->sh.num_entry_point_offsets + 1);
2591 
2592  if (!s->sList[1]) {
2593  for (i = 1; i < s->threads_number; i++) {
2594  s->sList[i] = av_malloc(sizeof(HEVCContext));
2595  memcpy(s->sList[i], s, sizeof(HEVCContext));
2596  s->HEVClcList[i] = av_mallocz(sizeof(HEVCLocalContext));
2597  s->sList[i]->HEVClc = s->HEVClcList[i];
2598  }
2599  }
2600 
2601  offset = (lc->gb.index >> 3);
2602 
2603  for (j = 0, cmpt = 0, startheader = offset + s->sh.entry_point_offset[0]; j < nal->skipped_bytes; j++) {
2604  if (nal->skipped_bytes_pos[j] >= offset && nal->skipped_bytes_pos[j] < startheader) {
2605  startheader--;
2606  cmpt++;
2607  }
2608  }
2609 
2610  for (i = 1; i < s->sh.num_entry_point_offsets; i++) {
2611  offset += (s->sh.entry_point_offset[i - 1] - cmpt);
2612  for (j = 0, cmpt = 0, startheader = offset
2613  + s->sh.entry_point_offset[i]; j < nal->skipped_bytes; j++) {
2614  if (nal->skipped_bytes_pos[j] >= offset && nal->skipped_bytes_pos[j] < startheader) {
2615  startheader--;
2616  cmpt++;
2617  }
2618  }
2619  s->sh.size[i - 1] = s->sh.entry_point_offset[i] - cmpt;
2620  s->sh.offset[i - 1] = offset;
2621 
2622  }
2623  if (s->sh.num_entry_point_offsets != 0) {
2624  offset += s->sh.entry_point_offset[s->sh.num_entry_point_offsets - 1] - cmpt;
2625  if (length < offset) {
2626  av_log(s->avctx, AV_LOG_ERROR, "entry_point_offset table is corrupted\n");
2627  res = AVERROR_INVALIDDATA;
2628  goto error;
2629  }
2630  s->sh.size[s->sh.num_entry_point_offsets - 1] = length - offset;
2631  s->sh.offset[s->sh.num_entry_point_offsets - 1] = offset;
2632 
2633  }
2634  s->data = data;
2635 
2636  for (i = 1; i < s->threads_number; i++) {
2637  s->sList[i]->HEVClc->first_qp_group = 1;
2638  s->sList[i]->HEVClc->qp_y = s->sList[0]->HEVClc->qp_y;
2639  memcpy(s->sList[i], s, sizeof(HEVCContext));
2640  s->sList[i]->HEVClc = s->HEVClcList[i];
2641  }
2642 
2643  atomic_store(&s->wpp_err, 0);
2644  ff_reset_entries(s->avctx);
2645 
2646  for (i = 0; i <= s->sh.num_entry_point_offsets; i++) {
2647  arg[i] = i;
2648  ret[i] = 0;
2649  }
2650 
2651  if (s->ps.pps->entropy_coding_sync_enabled_flag)
2652  s->avctx->execute2(s->avctx, hls_decode_entry_wpp, arg, ret, s->sh.num_entry_point_offsets + 1);
2653 
2654  for (i = 0; i <= s->sh.num_entry_point_offsets; i++)
2655  res += ret[i];
2656 error:
2657  av_free(ret);
2658  av_free(arg);
2659  return res;
2660 }
2661 
2663 {
2664  AVFrame *out = s->ref->frame;
2665 
2666  if (s->sei.frame_packing.present &&
2667  s->sei.frame_packing.arrangement_type >= 3 &&
2668  s->sei.frame_packing.arrangement_type <= 5 &&
2669  s->sei.frame_packing.content_interpretation_type > 0 &&
2670  s->sei.frame_packing.content_interpretation_type < 3) {
2672  if (!stereo)
2673  return AVERROR(ENOMEM);
2674 
2675  switch (s->sei.frame_packing.arrangement_type) {
2676  case 3:
2677  if (s->sei.frame_packing.quincunx_subsampling)
2679  else
2680  stereo->type = AV_STEREO3D_SIDEBYSIDE;
2681  break;
2682  case 4:
2683  stereo->type = AV_STEREO3D_TOPBOTTOM;
2684  break;
2685  case 5:
2686  stereo->type = AV_STEREO3D_FRAMESEQUENCE;
2687  break;
2688  }
2689 
2690  if (s->sei.frame_packing.content_interpretation_type == 2)
2691  stereo->flags = AV_STEREO3D_FLAG_INVERT;
2692 
2693  if (s->sei.frame_packing.arrangement_type == 5) {
2694  if (s->sei.frame_packing.current_frame_is_frame0_flag)
2695  stereo->view = AV_STEREO3D_VIEW_LEFT;
2696  else
2697  stereo->view = AV_STEREO3D_VIEW_RIGHT;
2698  }
2699  }
2700 
2701  if (s->sei.display_orientation.present &&
2702  (s->sei.display_orientation.anticlockwise_rotation ||
2703  s->sei.display_orientation.hflip || s->sei.display_orientation.vflip)) {
2704  double angle = s->sei.display_orientation.anticlockwise_rotation * 360 / (double) (1 << 16);
2707  sizeof(int32_t) * 9);
2708  if (!rotation)
2709  return AVERROR(ENOMEM);
2710 
2711  av_display_rotation_set((int32_t *)rotation->data, angle);
2712  av_display_matrix_flip((int32_t *)rotation->data,
2713  s->sei.display_orientation.hflip,
2714  s->sei.display_orientation.vflip);
2715  }
2716 
2717  // Decrement the mastering display flag when IRAP frame has no_rasl_output_flag=1
2718  // so the side data persists for the entire coded video sequence.
2719  if (s->sei.mastering_display.present > 0 &&
2720  IS_IRAP(s) && s->no_rasl_output_flag) {
2721  s->sei.mastering_display.present--;
2722  }
2723  if (s->sei.mastering_display.present) {
2724  // HEVC uses a g,b,r ordering, which we convert to a more natural r,g,b
2725  const int mapping[3] = {2, 0, 1};
2726  const int chroma_den = 50000;
2727  const int luma_den = 10000;
2728  int i;
2729  AVMasteringDisplayMetadata *metadata =
2731  if (!metadata)
2732  return AVERROR(ENOMEM);
2733 
2734  for (i = 0; i < 3; i++) {
2735  const int j = mapping[i];
2736  metadata->display_primaries[i][0].num = s->sei.mastering_display.display_primaries[j][0];
2737  metadata->display_primaries[i][0].den = chroma_den;
2738  metadata->display_primaries[i][1].num = s->sei.mastering_display.display_primaries[j][1];
2739  metadata->display_primaries[i][1].den = chroma_den;
2740  }
2741  metadata->white_point[0].num = s->sei.mastering_display.white_point[0];
2742  metadata->white_point[0].den = chroma_den;
2743  metadata->white_point[1].num = s->sei.mastering_display.white_point[1];
2744  metadata->white_point[1].den = chroma_den;
2745 
2746  metadata->max_luminance.num = s->sei.mastering_display.max_luminance;
2747  metadata->max_luminance.den = luma_den;
2748  metadata->min_luminance.num = s->sei.mastering_display.min_luminance;
2749  metadata->min_luminance.den = luma_den;
2750  metadata->has_luminance = 1;
2751  metadata->has_primaries = 1;
2752 
2753  av_log(s->avctx, AV_LOG_DEBUG, "Mastering Display Metadata:\n");
2754  av_log(s->avctx, AV_LOG_DEBUG,
2755  "r(%5.4f,%5.4f) g(%5.4f,%5.4f) b(%5.4f %5.4f) wp(%5.4f, %5.4f)\n",
2756  av_q2d(metadata->display_primaries[0][0]),
2757  av_q2d(metadata->display_primaries[0][1]),
2758  av_q2d(metadata->display_primaries[1][0]),
2759  av_q2d(metadata->display_primaries[1][1]),
2760  av_q2d(metadata->display_primaries[2][0]),
2761  av_q2d(metadata->display_primaries[2][1]),
2762  av_q2d(metadata->white_point[0]), av_q2d(metadata->white_point[1]));
2763  av_log(s->avctx, AV_LOG_DEBUG,
2764  "min_luminance=%f, max_luminance=%f\n",
2765  av_q2d(metadata->min_luminance), av_q2d(metadata->max_luminance));
2766  }
2767  // Decrement the mastering display flag when IRAP frame has no_rasl_output_flag=1
2768  // so the side data persists for the entire coded video sequence.
2769  if (s->sei.content_light.present > 0 &&
2770  IS_IRAP(s) && s->no_rasl_output_flag) {
2771  s->sei.content_light.present--;
2772  }
2773  if (s->sei.content_light.present) {
2774  AVContentLightMetadata *metadata =
2776  if (!metadata)
2777  return AVERROR(ENOMEM);
2778  metadata->MaxCLL = s->sei.content_light.max_content_light_level;
2779  metadata->MaxFALL = s->sei.content_light.max_pic_average_light_level;
2780 
2781  av_log(s->avctx, AV_LOG_DEBUG, "Content Light Level Metadata:\n");
2782  av_log(s->avctx, AV_LOG_DEBUG, "MaxCLL=%d, MaxFALL=%d\n",
2783  metadata->MaxCLL, metadata->MaxFALL);
2784  }
2785 
2786  if (s->sei.a53_caption.a53_caption) {
2789  s->sei.a53_caption.a53_caption_size);
2790  if (sd)
2791  memcpy(sd->data, s->sei.a53_caption.a53_caption, s->sei.a53_caption.a53_caption_size);
2792  av_freep(&s->sei.a53_caption.a53_caption);
2793  s->sei.a53_caption.a53_caption_size = 0;
2794  s->avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS;
2795  }
2796 
2797  return 0;
2798 }
2799 
2801 {
2802  HEVCLocalContext *lc = s->HEVClc;
2803  int pic_size_in_ctb = ((s->ps.sps->width >> s->ps.sps->log2_min_cb_size) + 1) *
2804  ((s->ps.sps->height >> s->ps.sps->log2_min_cb_size) + 1);
2805  int ret;
2806 
2807  memset(s->horizontal_bs, 0, s->bs_width * s->bs_height);
2808  memset(s->vertical_bs, 0, s->bs_width * s->bs_height);
2809  memset(s->cbf_luma, 0, s->ps.sps->min_tb_width * s->ps.sps->min_tb_height);
2810  memset(s->is_pcm, 0, (s->ps.sps->min_pu_width + 1) * (s->ps.sps->min_pu_height + 1));
2811  memset(s->tab_slice_address, -1, pic_size_in_ctb * sizeof(*s->tab_slice_address));
2812 
2813  s->is_decoded = 0;
2814  s->first_nal_type = s->nal_unit_type;
2815 
2816  s->no_rasl_output_flag = IS_IDR(s) || IS_BLA(s) || (s->nal_unit_type == HEVC_NAL_CRA_NUT && s->last_eos);
2817 
2818  if (s->ps.pps->tiles_enabled_flag)
2819  lc->end_of_tiles_x = s->ps.pps->column_width[0] << s->ps.sps->log2_ctb_size;
2820 
2821  ret = ff_hevc_set_new_ref(s, &s->frame, s->poc);
2822  if (ret < 0)
2823  goto fail;
2824 
2825  ret = ff_hevc_frame_rps(s);
2826  if (ret < 0) {
2827  av_log(s->avctx, AV_LOG_ERROR, "Error constructing the frame RPS.\n");
2828  goto fail;
2829  }
2830 
2831  s->ref->frame->key_frame = IS_IRAP(s);
2832 
2833  ret = set_side_data(s);
2834  if (ret < 0)
2835  goto fail;
2836 
2837  s->frame->pict_type = 3 - s->sh.slice_type;
2838 
2839  if (!IS_IRAP(s))
2841 
2842  av_frame_unref(s->output_frame);
2843  ret = ff_hevc_output_frame(s, s->output_frame, 0);
2844  if (ret < 0)
2845  goto fail;
2846 
2847  if (!s->avctx->hwaccel)
2848  ff_thread_finish_setup(s->avctx);
2849 
2850  return 0;
2851 
2852 fail:
2853  if (s->ref)
2854  ff_hevc_unref_frame(s, s->ref, ~0);
2855  s->ref = NULL;
2856  return ret;
2857 }
2858 
2859 static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal)
2860 {
2861  HEVCLocalContext *lc = s->HEVClc;
2862  GetBitContext *gb = &lc->gb;
2863  int ctb_addr_ts, ret;
2864 
2865  *gb = nal->gb;
2866  s->nal_unit_type = nal->type;
2867  s->temporal_id = nal->temporal_id;
2868 
2869  switch (s->nal_unit_type) {
2870  case HEVC_NAL_VPS:
2871  if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
2872  ret = s->avctx->hwaccel->decode_params(s->avctx,
2873  nal->type,
2874  nal->raw_data,
2875  nal->raw_size);
2876  if (ret < 0)
2877  goto fail;
2878  }
2879  ret = ff_hevc_decode_nal_vps(gb, s->avctx, &s->ps);
2880  if (ret < 0)
2881  goto fail;
2882  break;
2883  case HEVC_NAL_SPS:
2884  if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
2885  ret = s->avctx->hwaccel->decode_params(s->avctx,
2886  nal->type,
2887  nal->raw_data,
2888  nal->raw_size);
2889  if (ret < 0)
2890  goto fail;
2891  }
2892  ret = ff_hevc_decode_nal_sps(gb, s->avctx, &s->ps,
2893  s->apply_defdispwin);
2894  if (ret < 0)
2895  goto fail;
2896  break;
2897  case HEVC_NAL_PPS:
2898  if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
2899  ret = s->avctx->hwaccel->decode_params(s->avctx,
2900  nal->type,
2901  nal->raw_data,
2902  nal->raw_size);
2903  if (ret < 0)
2904  goto fail;
2905  }
2906  ret = ff_hevc_decode_nal_pps(gb, s->avctx, &s->ps);
2907  if (ret < 0)
2908  goto fail;
2909  break;
2910  case HEVC_NAL_SEI_PREFIX:
2911  case HEVC_NAL_SEI_SUFFIX:
2912  if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
2913  ret = s->avctx->hwaccel->decode_params(s->avctx,
2914  nal->type,
2915  nal->raw_data,
2916  nal->raw_size);
2917  if (ret < 0)
2918  goto fail;
2919  }
2920  ret = ff_hevc_decode_nal_sei(gb, s->avctx, &s->sei, &s->ps, s->nal_unit_type);
2921  if (ret < 0)
2922  goto fail;
2923  break;
2924  case HEVC_NAL_TRAIL_R:
2925  case HEVC_NAL_TRAIL_N:
2926  case HEVC_NAL_TSA_N:
2927  case HEVC_NAL_TSA_R:
2928  case HEVC_NAL_STSA_N:
2929  case HEVC_NAL_STSA_R:
2930  case HEVC_NAL_BLA_W_LP:
2931  case HEVC_NAL_BLA_W_RADL:
2932  case HEVC_NAL_BLA_N_LP:
2933  case HEVC_NAL_IDR_W_RADL:
2934  case HEVC_NAL_IDR_N_LP:
2935  case HEVC_NAL_CRA_NUT:
2936  case HEVC_NAL_RADL_N:
2937  case HEVC_NAL_RADL_R:
2938  case HEVC_NAL_RASL_N:
2939  case HEVC_NAL_RASL_R:
2940  ret = hls_slice_header(s);
2941  if (ret < 0)
2942  return ret;
2943  if (ret == 1) {
2945  goto fail;
2946  }
2947 
2948 
2949  if (
2950  (s->avctx->skip_frame >= AVDISCARD_BIDIR && s->sh.slice_type == HEVC_SLICE_B) ||
2951  (s->avctx->skip_frame >= AVDISCARD_NONINTRA && s->sh.slice_type != HEVC_SLICE_I) ||
2952  (s->avctx->skip_frame >= AVDISCARD_NONKEY && !IS_IRAP(s))) {
2953  break;
2954  }
2955 
2956  if (s->sh.first_slice_in_pic_flag) {
2957  if (s->max_ra == INT_MAX) {
2958  if (s->nal_unit_type == HEVC_NAL_CRA_NUT || IS_BLA(s)) {
2959  s->max_ra = s->poc;
2960  } else {
2961  if (IS_IDR(s))
2962  s->max_ra = INT_MIN;
2963  }
2964  }
2965 
2966  if ((s->nal_unit_type == HEVC_NAL_RASL_R || s->nal_unit_type == HEVC_NAL_RASL_N) &&
2967  s->poc <= s->max_ra) {
2968  s->is_decoded = 0;
2969  break;
2970  } else {
2971  if (s->nal_unit_type == HEVC_NAL_RASL_R && s->poc > s->max_ra)
2972  s->max_ra = INT_MIN;
2973  }
2974 
2975  s->overlap ++;
2976  ret = hevc_frame_start(s);
2977  if (ret < 0)
2978  return ret;
2979  } else if (!s->ref) {
2980  av_log(s->avctx, AV_LOG_ERROR, "First slice in a frame missing.\n");
2981  goto fail;
2982  }
2983 
2984  if (s->nal_unit_type != s->first_nal_type) {
2985  av_log(s->avctx, AV_LOG_ERROR,
2986  "Non-matching NAL types of the VCL NALUs: %d %d\n",
2987  s->first_nal_type, s->nal_unit_type);
2988  return AVERROR_INVALIDDATA;
2989  }
2990 
2991  if (!s->sh.dependent_slice_segment_flag &&
2992  s->sh.slice_type != HEVC_SLICE_I) {
2993  ret = ff_hevc_slice_rpl(s);
2994  if (ret < 0) {
2995  av_log(s->avctx, AV_LOG_WARNING,
2996  "Error constructing the reference lists for the current slice.\n");
2997  goto fail;
2998  }
2999  }
3000 
3001  if (s->sh.first_slice_in_pic_flag && s->avctx->hwaccel) {
3002  ret = s->avctx->hwaccel->start_frame(s->avctx, NULL, 0);
3003  if (ret < 0)
3004  goto fail;
3005  }
3006 
3007  if (s->avctx->hwaccel) {
3008  ret = s->avctx->hwaccel->decode_slice(s->avctx, nal->raw_data, nal->raw_size);
3009  if (ret < 0)
3010  goto fail;
3011  } else {
3012  if (s->threads_number > 1 && s->sh.num_entry_point_offsets > 0)
3013  ctb_addr_ts = hls_slice_data_wpp(s, nal);
3014  else
3015  ctb_addr_ts = hls_slice_data(s);
3016  if (ctb_addr_ts >= (s->ps.sps->ctb_width * s->ps.sps->ctb_height)) {
3017  s->is_decoded = 1;
3018  }
3019 
3020  if (ctb_addr_ts < 0) {
3021  ret = ctb_addr_ts;
3022  goto fail;
3023  }
3024  }
3025  break;
3026  case HEVC_NAL_EOS_NUT:
3027  case HEVC_NAL_EOB_NUT:
3028  s->seq_decode = (s->seq_decode + 1) & 0xff;
3029  s->max_ra = INT_MAX;
3030  break;
3031  case HEVC_NAL_AUD:
3032  case HEVC_NAL_FD_NUT:
3033  break;
3034  default:
3035  av_log(s->avctx, AV_LOG_INFO,
3036  "Skipping NAL unit %d\n", s->nal_unit_type);
3037  }
3038 
3039  return 0;
3040 fail:
3041  if (s->avctx->err_recognition & AV_EF_EXPLODE)
3042  return ret;
3043  return 0;
3044 }
3045 
3046 static int decode_nal_units(HEVCContext *s, const uint8_t *buf, int length)
3047 {
3048  int i, ret = 0;
3049  int eos_at_start = 1;
3050 
3051  s->ref = NULL;
3052  s->last_eos = s->eos;
3053  s->eos = 0;
3054  s->overlap = 0;
3055 
3056  /* split the input packet into NAL units, so we know the upper bound on the
3057  * number of slices in the frame */
3058  ret = ff_h2645_packet_split(&s->pkt, buf, length, s->avctx, s->is_nalff,
3059  s->nal_length_size, s->avctx->codec_id, 1, 0);
3060  if (ret < 0) {
3061  av_log(s->avctx, AV_LOG_ERROR,
3062  "Error splitting the input into NAL units.\n");
3063  return ret;
3064  }
3065 
3066  for (i = 0; i < s->pkt.nb_nals; i++) {
3067  if (s->pkt.nals[i].type == HEVC_NAL_EOB_NUT ||
3068  s->pkt.nals[i].type == HEVC_NAL_EOS_NUT) {
3069  if (eos_at_start) {
3070  s->last_eos = 1;
3071  } else {
3072  s->eos = 1;
3073  }
3074  } else {
3075  eos_at_start = 0;
3076  }
3077  }
3078 
3079  /* decode the NAL units */
3080  for (i = 0; i < s->pkt.nb_nals; i++) {
3081  H2645NAL *nal = &s->pkt.nals[i];
3082 
3083  if (s->avctx->skip_frame >= AVDISCARD_ALL ||
3084  (s->avctx->skip_frame >= AVDISCARD_NONREF
3085  && ff_hevc_nal_is_nonref(nal->type)))
3086  continue;
3087 
3088  ret = decode_nal_unit(s, nal);
3089  if (ret >= 0 && s->overlap > 2)
3091  if (ret < 0) {
3092  av_log(s->avctx, AV_LOG_WARNING,
3093  "Error parsing NAL unit #%d.\n", i);
3094  goto fail;
3095  }
3096  }
3097 
3098 fail:
3099  if (s->ref && s->threads_type == FF_THREAD_FRAME)
3100  ff_thread_report_progress(&s->ref->tf, INT_MAX, 0);
3101 
3102  return ret;
3103 }
3104 
3105 static void print_md5(void *log_ctx, int level, uint8_t md5[16])
3106 {
3107  int i;
3108  for (i = 0; i < 16; i++)
3109  av_log(log_ctx, level, "%02"PRIx8, md5[i]);
3110 }
3111 
3113 {
3115  int pixel_shift;
3116  int i, j;
3117 
3118  if (!desc)
3119  return AVERROR(EINVAL);
3120 
3121  pixel_shift = desc->comp[0].depth > 8;
3122 
3123  av_log(s->avctx, AV_LOG_DEBUG, "Verifying checksum for frame with POC %d: ",
3124  s->poc);
3125 
3126  /* the checksums are LE, so we have to byteswap for >8bpp formats
3127  * on BE arches */
3128 #if HAVE_BIGENDIAN
3129  if (pixel_shift && !s->checksum_buf) {
3130  av_fast_malloc(&s->checksum_buf, &s->checksum_buf_size,
3131  FFMAX3(frame->linesize[0], frame->linesize[1],
3132  frame->linesize[2]));
3133  if (!s->checksum_buf)
3134  return AVERROR(ENOMEM);
3135  }
3136 #endif
3137 
3138  for (i = 0; frame->data[i]; i++) {
3139  int width = s->avctx->coded_width;
3140  int height = s->avctx->coded_height;
3141  int w = (i == 1 || i == 2) ? (width >> desc->log2_chroma_w) : width;
3142  int h = (i == 1 || i == 2) ? (height >> desc->log2_chroma_h) : height;
3143  uint8_t md5[16];
3144 
3145  av_md5_init(s->md5_ctx);
3146  for (j = 0; j < h; j++) {
3147  const uint8_t *src = frame->data[i] + j * frame->linesize[i];
3148 #if HAVE_BIGENDIAN
3149  if (pixel_shift) {
3150  s->bdsp.bswap16_buf((uint16_t *) s->checksum_buf,
3151  (const uint16_t *) src, w);
3152  src = s->checksum_buf;
3153  }
3154 #endif
3155  av_md5_update(s->md5_ctx, src, w << pixel_shift);
3156  }
3157  av_md5_final(s->md5_ctx, md5);
3158 
3159  if (!memcmp(md5, s->sei.picture_hash.md5[i], 16)) {
3160  av_log (s->avctx, AV_LOG_DEBUG, "plane %d - correct ", i);
3161  print_md5(s->avctx, AV_LOG_DEBUG, md5);
3162  av_log (s->avctx, AV_LOG_DEBUG, "; ");
3163  } else {
3164  av_log (s->avctx, AV_LOG_ERROR, "mismatching checksum of plane %d - ", i);
3165  print_md5(s->avctx, AV_LOG_ERROR, md5);
3166  av_log (s->avctx, AV_LOG_ERROR, " != ");
3167  print_md5(s->avctx, AV_LOG_ERROR, s->sei.picture_hash.md5[i]);
3168  av_log (s->avctx, AV_LOG_ERROR, "\n");
3169  return AVERROR_INVALIDDATA;
3170  }
3171  }
3172 
3173  av_log(s->avctx, AV_LOG_DEBUG, "\n");
3174 
3175  return 0;
3176 }
3177 
3179 {
3180  int ret, i;
3181 
3182  ret = ff_hevc_decode_extradata(buf, length, &s->ps, &s->sei, &s->is_nalff,
3183  &s->nal_length_size, s->avctx->err_recognition,
3184  s->apply_defdispwin, s->avctx);
3185  if (ret < 0)
3186  return ret;
3187 
3188  /* export stream parameters from the first SPS */
3189  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.sps_list); i++) {
3190  if (first && s->ps.sps_list[i]) {
3191  const HEVCSPS *sps = (const HEVCSPS*)s->ps.sps_list[i]->data;
3193  break;
3194  }
3195  }
3196 
3197  return 0;
3198 }
3199 
3200 static int hevc_decode_frame(AVCodecContext *avctx, void *data, int *got_output,
3201  AVPacket *avpkt)
3202 {
3203  int ret;
3204  int new_extradata_size;
3205  uint8_t *new_extradata;
3206  HEVCContext *s = avctx->priv_data;
3207 
3208  if (!avpkt->size) {
3209  ret = ff_hevc_output_frame(s, data, 1);
3210  if (ret < 0)
3211  return ret;
3212 
3213  *got_output = ret;
3214  return 0;
3215  }
3216 
3217  new_extradata = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA,
3218  &new_extradata_size);
3219  if (new_extradata && new_extradata_size > 0) {
3220  ret = hevc_decode_extradata(s, new_extradata, new_extradata_size, 0);
3221  if (ret < 0)
3222  return ret;
3223  }
3224 
3225  s->ref = NULL;
3226  ret = decode_nal_units(s, avpkt->data, avpkt->size);
3227  if (ret < 0)
3228  return ret;
3229 
3230  if (avctx->hwaccel) {
3231  if (s->ref && (ret = avctx->hwaccel->end_frame(avctx)) < 0) {
3232  av_log(avctx, AV_LOG_ERROR,
3233  "hardware accelerator failed to decode picture\n");
3234  ff_hevc_unref_frame(s, s->ref, ~0);
3235  return ret;
3236  }
3237  } else {
3238  /* verify the SEI checksum */
3239  if (avctx->err_recognition & AV_EF_CRCCHECK && s->is_decoded &&
3240  s->sei.picture_hash.is_md5) {
3241  ret = verify_md5(s, s->ref->frame);
3242  if (ret < 0 && avctx->err_recognition & AV_EF_EXPLODE) {
3243  ff_hevc_unref_frame(s, s->ref, ~0);
3244  return ret;
3245  }
3246  }
3247  }
3248  s->sei.picture_hash.is_md5 = 0;
3249 
3250  if (s->is_decoded) {
3251  av_log(avctx, AV_LOG_DEBUG, "Decoded frame with POC %d.\n", s->poc);
3252  s->is_decoded = 0;
3253  }
3254 
3255  if (s->output_frame->buf[0]) {
3256  av_frame_move_ref(data, s->output_frame);
3257  *got_output = 1;
3258  }
3259 
3260  return avpkt->size;
3261 }
3262 
3264 {
3265  int ret;
3266 
3267  ret = ff_thread_ref_frame(&dst->tf, &src->tf);
3268  if (ret < 0)
3269  return ret;
3270 
3271  dst->tab_mvf_buf = av_buffer_ref(src->tab_mvf_buf);
3272  if (!dst->tab_mvf_buf)
3273  goto fail;
3274  dst->tab_mvf = src->tab_mvf;
3275 
3276  dst->rpl_tab_buf = av_buffer_ref(src->rpl_tab_buf);
3277  if (!dst->rpl_tab_buf)
3278  goto fail;
3279  dst->rpl_tab = src->rpl_tab;
3280 
3281  dst->rpl_buf = av_buffer_ref(src->rpl_buf);
3282  if (!dst->rpl_buf)
3283  goto fail;
3284 
3285  dst->poc = src->poc;
3286  dst->ctb_count = src->ctb_count;
3287  dst->flags = src->flags;
3288  dst->sequence = src->sequence;
3289 
3290  if (src->hwaccel_picture_private) {
3291  dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
3292  if (!dst->hwaccel_priv_buf)
3293  goto fail;
3295  }
3296 
3297  return 0;
3298 fail:
3299  ff_hevc_unref_frame(s, dst, ~0);
3300  return AVERROR(ENOMEM);
3301 }
3302 
3304 {
3305  HEVCContext *s = avctx->priv_data;
3306  int i;
3307 
3308  pic_arrays_free(s);
3309 
3310  av_freep(&s->md5_ctx);
3311 
3312  av_freep(&s->cabac_state);
3313 
3314  for (i = 0; i < 3; i++) {
3315  av_freep(&s->sao_pixel_buffer_h[i]);
3316  av_freep(&s->sao_pixel_buffer_v[i]);
3317  }
3318  av_frame_free(&s->output_frame);
3319 
3320  for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3321  ff_hevc_unref_frame(s, &s->DPB[i], ~0);
3322  av_frame_free(&s->DPB[i].frame);
3323  }
3324 
3325  ff_hevc_ps_uninit(&s->ps);
3326 
3327  av_freep(&s->sh.entry_point_offset);
3328  av_freep(&s->sh.offset);
3329  av_freep(&s->sh.size);
3330 
3331  for (i = 1; i < s->threads_number; i++) {
3332  HEVCLocalContext *lc = s->HEVClcList[i];
3333  if (lc) {
3334  av_freep(&s->HEVClcList[i]);
3335  av_freep(&s->sList[i]);
3336  }
3337  }
3338  if (s->HEVClc == s->HEVClcList[0])
3339  s->HEVClc = NULL;
3340  av_freep(&s->HEVClcList[0]);
3341 
3342  ff_h2645_packet_uninit(&s->pkt);
3343 
3344  ff_hevc_reset_sei(&s->sei);
3345 
3346  return 0;
3347 }
3348 
3350 {
3351  HEVCContext *s = avctx->priv_data;
3352  int i;
3353 
3354  s->avctx = avctx;
3355 
3356  s->HEVClc = av_mallocz(sizeof(HEVCLocalContext));
3357  if (!s->HEVClc)
3358  goto fail;
3359  s->HEVClcList[0] = s->HEVClc;
3360  s->sList[0] = s;
3361 
3362  s->cabac_state = av_malloc(HEVC_CONTEXTS);
3363  if (!s->cabac_state)
3364  goto fail;
3365 
3366  s->output_frame = av_frame_alloc();
3367  if (!s->output_frame)
3368  goto fail;
3369 
3370  for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3371  s->DPB[i].frame = av_frame_alloc();
3372  if (!s->DPB[i].frame)
3373  goto fail;
3374  s->DPB[i].tf.f = s->DPB[i].frame;
3375  }
3376 
3377  s->max_ra = INT_MAX;
3378 
3379  s->md5_ctx = av_md5_alloc();
3380  if (!s->md5_ctx)
3381  goto fail;
3382 
3383  ff_bswapdsp_init(&s->bdsp);
3384 
3385  s->context_initialized = 1;
3386  s->eos = 0;
3387 
3388  ff_hevc_reset_sei(&s->sei);
3389 
3390  return 0;
3391 
3392 fail:
3393  hevc_decode_free(avctx);
3394  return AVERROR(ENOMEM);
3395 }
3396 
3397 #if HAVE_THREADS
3398 static int hevc_update_thread_context(AVCodecContext *dst,
3399  const AVCodecContext *src)
3400 {
3401  HEVCContext *s = dst->priv_data;
3402  HEVCContext *s0 = src->priv_data;
3403  int i, ret;
3404 
3405  if (!s->context_initialized) {
3406  ret = hevc_init_context(dst);
3407  if (ret < 0)
3408  return ret;
3409  }
3410 
3411  for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3412  ff_hevc_unref_frame(s, &s->DPB[i], ~0);
3413  if (s0->DPB[i].frame->buf[0]) {
3414  ret = hevc_ref_frame(s, &s->DPB[i], &s0->DPB[i]);
3415  if (ret < 0)
3416  return ret;
3417  }
3418  }
3419 
3420  if (s->ps.sps != s0->ps.sps)
3421  s->ps.sps = NULL;
3422  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.vps_list); i++) {
3423  av_buffer_unref(&s->ps.vps_list[i]);
3424  if (s0->ps.vps_list[i]) {
3425  s->ps.vps_list[i] = av_buffer_ref(s0->ps.vps_list[i]);
3426  if (!s->ps.vps_list[i])
3427  return AVERROR(ENOMEM);
3428  }
3429  }
3430 
3431  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.sps_list); i++) {
3432  av_buffer_unref(&s->ps.sps_list[i]);
3433  if (s0->ps.sps_list[i]) {
3434  s->ps.sps_list[i] = av_buffer_ref(s0->ps.sps_list[i]);
3435  if (!s->ps.sps_list[i])
3436  return AVERROR(ENOMEM);
3437  }
3438  }
3439 
3440  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.pps_list); i++) {
3441  av_buffer_unref(&s->ps.pps_list[i]);
3442  if (s0->ps.pps_list[i]) {
3443  s->ps.pps_list[i] = av_buffer_ref(s0->ps.pps_list[i]);
3444  if (!s->ps.pps_list[i])
3445  return AVERROR(ENOMEM);
3446  }
3447  }
3448 
3449  if (s->ps.sps != s0->ps.sps)
3450  if ((ret = set_sps(s, s0->ps.sps, src->pix_fmt)) < 0)
3451  return ret;
3452 
3453  s->seq_decode = s0->seq_decode;
3454  s->seq_output = s0->seq_output;
3455  s->pocTid0 = s0->pocTid0;
3456  s->max_ra = s0->max_ra;
3457  s->eos = s0->eos;
3458  s->no_rasl_output_flag = s0->no_rasl_output_flag;
3459 
3460  s->is_nalff = s0->is_nalff;
3461  s->nal_length_size = s0->nal_length_size;
3462 
3463  s->threads_number = s0->threads_number;
3464  s->threads_type = s0->threads_type;
3465 
3466  if (s0->eos) {
3467  s->seq_decode = (s->seq_decode + 1) & 0xff;
3468  s->max_ra = INT_MAX;
3469  }
3470 
3471  s->sei.frame_packing = s0->sei.frame_packing;
3472  s->sei.display_orientation = s0->sei.display_orientation;
3473  s->sei.mastering_display = s0->sei.mastering_display;
3474  s->sei.content_light = s0->sei.content_light;
3475  s->sei.alternative_transfer = s0->sei.alternative_transfer;
3476 
3477  return 0;
3478 }
3479 #endif
3480 
3482 {
3483  HEVCContext *s = avctx->priv_data;
3484  int ret;
3485 
3486  avctx->internal->allocate_progress = 1;
3487 
3488  ret = hevc_init_context(avctx);
3489  if (ret < 0)
3490  return ret;
3491 
3492  s->enable_parallel_tiles = 0;
3493  s->sei.picture_timing.picture_struct = 0;
3494  s->eos = 1;
3495 
3496  atomic_init(&s->wpp_err, 0);
3497 
3498  if(avctx->active_thread_type & FF_THREAD_SLICE)
3499  s->threads_number = avctx->thread_count;
3500  else
3501  s->threads_number = 1;
3502 
3503  if (avctx->extradata_size > 0 && avctx->extradata) {
3504  ret = hevc_decode_extradata(s, avctx->extradata, avctx->extradata_size, 1);
3505  if (ret < 0) {
3506  hevc_decode_free(avctx);
3507  return ret;
3508  }
3509  }
3510 
3511  if((avctx->active_thread_type & FF_THREAD_FRAME) && avctx->thread_count > 1)
3512  s->threads_type = FF_THREAD_FRAME;
3513  else
3514  s->threads_type = FF_THREAD_SLICE;
3515 
3516  return 0;
3517 }
3518 
3519 #if HAVE_THREADS
3520 static av_cold int hevc_init_thread_copy(AVCodecContext *avctx)
3521 {
3522  HEVCContext *s = avctx->priv_data;
3523  int ret;
3524 
3525  memset(s, 0, sizeof(*s));
3526 
3527  ret = hevc_init_context(avctx);
3528  if (ret < 0)
3529  return ret;
3530 
3531  return 0;
3532 }
3533 #endif
3534 
3536 {
3537  HEVCContext *s = avctx->priv_data;
3539  ff_hevc_reset_sei(&s->sei);
3540  s->max_ra = INT_MAX;
3541  s->eos = 1;
3542 }
3543 
3544 #define OFFSET(x) offsetof(HEVCContext, x)
3545 #define PAR (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
3546 
3547 static const AVOption options[] = {
3548  { "apply_defdispwin", "Apply default display window from VUI", OFFSET(apply_defdispwin),
3549  AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, PAR },
3550  { "strict-displaywin", "stricly apply default display window size", OFFSET(apply_defdispwin),
3551  AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, PAR },
3552  { NULL },
3553 };
3554 
3555 static const AVClass hevc_decoder_class = {
3556  .class_name = "HEVC decoder",
3557  .item_name = av_default_item_name,
3558  .option = options,
3559  .version = LIBAVUTIL_VERSION_INT,
3560 };
3561 
3563  .name = "hevc",
3564  .long_name = NULL_IF_CONFIG_SMALL("HEVC (High Efficiency Video Coding)"),
3565  .type = AVMEDIA_TYPE_VIDEO,
3566  .id = AV_CODEC_ID_HEVC,
3567  .priv_data_size = sizeof(HEVCContext),
3568  .priv_class = &hevc_decoder_class,
3570  .close = hevc_decode_free,
3573  .update_thread_context = ONLY_IF_THREADS_ENABLED(hevc_update_thread_context),
3574  .init_thread_copy = ONLY_IF_THREADS_ENABLED(hevc_init_thread_copy),
3575  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY |
3579  .hw_configs = (const AVCodecHWConfigInternal*[]) {
3580 #if CONFIG_HEVC_DXVA2_HWACCEL
3581  HWACCEL_DXVA2(hevc),
3582 #endif
3583 #if CONFIG_HEVC_D3D11VA_HWACCEL
3584  HWACCEL_D3D11VA(hevc),
3585 #endif
3586 #if CONFIG_HEVC_D3D11VA2_HWACCEL
3587  HWACCEL_D3D11VA2(hevc),
3588 #endif
3589 #if CONFIG_HEVC_NVDEC_HWACCEL
3590  HWACCEL_NVDEC(hevc),
3591 #endif
3592 #if CONFIG_HEVC_VAAPI_HWACCEL
3593  HWACCEL_VAAPI(hevc),
3594 #endif
3595 #if CONFIG_HEVC_VDPAU_HWACCEL
3596  HWACCEL_VDPAU(hevc),
3597 #endif
3598 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
3599  HWACCEL_VIDEOTOOLBOX(hevc),
3600 #endif
3601  NULL
3602  },
3603 };
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwaccel.h:71
verify_md5
static int verify_md5(HEVCContext *s, AVFrame *frame)
Definition: hevcdec.c:3112
AVMasteringDisplayMetadata::has_primaries
int has_primaries
Flag indicating whether the display primaries (and white point) are set.
Definition: mastering_display_metadata.h:62
HEVC_NAL_RADL_N
@ HEVC_NAL_RADL_N
Definition: hevc.h:35
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:2729
SliceHeader::beta_offset
int beta_offset
beta_offset_div2 * 2
Definition: hevc_ps.h:96
AVCodec
AVCodec.
Definition: avcodec.h:3481
bswapdsp.h
L1
F H1 F F H1 F F F F H1<-F-------F-------F v v v H2 H3 H2 ^ ^ ^ F-------F-------F-> H1<-F-------F-------F|||||||||F H1 F|||||||||F H1 Funavailable fullpel samples(outside the picture for example) shall be equalto the closest available fullpel sampleSmaller pel interpolation:--------------------------if diag_mc is set then points which lie on a line between 2 vertically, horizontally or diagonally adjacent halfpel points shall be interpolatedlinearly with rounding to nearest and halfway values rounded up.points which lie on 2 diagonals at the same time should only use the onediagonal not containing the fullpel point F--> O q O<--h1-> O q O<--F v \/v \/v O O O O O O O|/|\|q q q q q|/|\|O O O O O O O ^/\ ^/\ ^ h2--> O q O<--h3-> O q O<--h2 v \/v \/v O O O O O O O|\|/|q q q q q|\|/|O O O O O O O ^/\ ^/\ ^ F--> O q O<--h1-> O q O<--Fthe remaining points shall be bilinearly interpolated from theup to 4 surrounding halfpel and fullpel points, again rounding should be tonearest and halfway values rounded upcompliant Snow decoders MUST support 1-1/8 pel luma and 1/2-1/16 pel chromainterpolation at leastOverlapped block motion compensation:-------------------------------------FIXMELL band prediction:===================Each sample in the LL0 subband is predicted by the median of the left, top andleft+top-topleft samples, samples outside the subband shall be considered tobe 0. To reverse this prediction in the decoder apply the following.for(y=0;y< height;y++){ for(x=0;x< width;x++){ sample[y][x]+=median(sample[y-1][x], sample[y][x-1], sample[y-1][x]+sample[y][x-1]-sample[y-1][x-1]);}}sample[-1][ *]=sample[ *][-1]=0;width, height here are the width and height of the LL0 subband not of the finalvideoDequantization:===============FIXMEWavelet Transform:==================Snow supports 2 wavelet transforms, the symmetric biorthogonal 5/3 integertransform and an integer approximation of the symmetric biorthogonal 9/7daubechies wavelet.2D IDWT(inverse discrete wavelet transform) --------------------------------------------The 2D IDWT applies a 2D filter recursively, each time combining the4 lowest frequency subbands into a single subband until only 1 subbandremains.The 2D filter is done by first applying a 1D filter in the vertical directionand then applying it in the horizontal one. --------------- --------------- --------------- ---------------|LL0|HL0|||||||||||||---+---|HL1||L0|H0|HL1||LL1|HL1|||||LH0|HH0|||||||||||||-------+-------|-> L1 H1 LH1 HH1 LH1 HH1 LH1 HH1 L1
Definition: snow.txt:554
stride
int stride
Definition: mace.c:144
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
HEVCLocalContext
Definition: hevcdec.h:340
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:235
HEVCFrame::flags
uint8_t flags
A combination of HEVC_FRAME_FLAG_*.
Definition: hevcdec.h:337
AVMasteringDisplayMetadata::max_luminance
AVRational max_luminance
Max luminance of mastering display (cd/m^2).
Definition: mastering_display_metadata.h:57
HWACCEL_MAX
#define HWACCEL_MAX
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
ff_hevc_sao_type_idx_decode
int ff_hevc_sao_type_idx_decode(HEVCContext *s)
Definition: hevc_cabac.c:566
HEVCFrame::tf
ThreadFrame tf
Definition: hevcdec.h:313
HEVCFrame::hwaccel_priv_buf
AVBufferRef * hwaccel_priv_buf
Definition: hevcdec.h:325
level
uint8_t level
Definition: svq3.c:207
ff_hevc_no_residual_syntax_flag_decode
int ff_hevc_no_residual_syntax_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:828
AV_STEREO3D_VIEW_LEFT
@ AV_STEREO3D_VIEW_LEFT
Frame contains only the left view.
Definition: stereo3d.h:156
atomic_store
#define atomic_store(object, desired)
Definition: stdatomic.h:85
hls_decode_neighbour
static void hls_decode_neighbour(HEVCContext *s, int x_ctb, int y_ctb, int ctb_addr_ts)
Definition: hevcdec.c:2362
ff_hevc_sao_eo_class_decode
int ff_hevc_sao_eo_class_decode(HEVCContext *s)
Definition: hevc_cabac.c:601
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
ff_hevc_pred_init
void ff_hevc_pred_init(HEVCPredContext *hpc, int bit_depth)
Definition: hevcpred.c:43
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AV_STEREO3D_SIDEBYSIDE_QUINCUNX
@ AV_STEREO3D_SIDEBYSIDE_QUINCUNX
Views are next to each other, but when upscaling apply a checkerboard pattern.
Definition: stereo3d.h:117
hevc_decode_flush
static void hevc_decode_flush(AVCodecContext *avctx)
Definition: hevcdec.c:3535
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:2193
ff_hevc_set_qPy
void ff_hevc_set_qPy(HEVCContext *s, int xBase, int yBase, int log2_cb_size)
Definition: hevc_filter.c:121
chroma_mc_bi
static void chroma_mc_bi(HEVCContext *s, uint8_t *dst0, ptrdiff_t dststride, AVFrame *ref0, AVFrame *ref1, int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int cidx)
8.5.3.2.2.2 Chroma sample bidirectional interpolation process
Definition: hevcdec.c:1655
PART_NxN
@ PART_NxN
Definition: hevcdec.h:147
luma_mc_bi
static void luma_mc_bi(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride, AVFrame *ref0, const Mv *mv0, int x_off, int y_off, int block_w, int block_h, AVFrame *ref1, const Mv *mv1, struct MvField *current_mv)
8.5.3.2.2.1 Luma sample bidirectional interpolation process
Definition: hevcdec.c:1499
ff_hevc_res_scale_sign_flag
int ff_hevc_res_scale_sign_flag(HEVCContext *s, int idx)
Definition: hevc_cabac.c:905
decode_nal_unit
static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal)
Definition: hevcdec.c:2859
ff_hevc_split_transform_flag_decode
int ff_hevc_split_transform_flag_decode(HEVCContext *s, int log2_trafo_size)
Definition: hevc_cabac.c:866
out
FILE * out
Definition: movenc.c:54
FFSWAP
#define FFSWAP(type, a, b)
Definition: common.h:99
SAO_BAND
@ SAO_BAND
Definition: hevcdec.h:213
ff_hevc_profiles
const AVProfile ff_hevc_profiles[]
Definition: profiles.c:77
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2522
ff_hevc_hls_filter
void ff_hevc_hls_filter(HEVCContext *s, int x, int y, int ctb_size)
Definition: hevc_filter.c:842
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:89
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
AVMasteringDisplayMetadata::display_primaries
AVRational display_primaries[3][2]
CIE 1931 xy chromaticity coords of color primaries (r, g, b order).
Definition: mastering_display_metadata.h:42
AVMasteringDisplayMetadata::has_luminance
int has_luminance
Flag indicating whether the luminance (min_ and max_) have been set.
Definition: mastering_display_metadata.h:67
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:2694
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:546
HEVCLocalContext::ctb_up_flag
uint8_t ctb_up_flag
Definition: hevcdec.h:358
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:722
mv
static const int8_t mv[256][2]
Definition: 4xm.c:77
SliceHeader::num_entry_point_offsets
int num_entry_point_offsets
Definition: hevc_ps.h:104
HEVC_NAL_STSA_N
@ HEVC_NAL_STSA_N
Definition: hevc.h:33
PART_2NxnU
@ PART_2NxnU
Definition: hevcdec.h:148
ff_hevc_cu_qp_delta_abs
int ff_hevc_cu_qp_delta_abs(HEVCContext *s)
Definition: hevc_cabac.c:633
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
AVContentLightMetadata::MaxCLL
unsigned MaxCLL
Max content light level (cd/m^2).
Definition: mastering_display_metadata.h:102
HWACCEL_D3D11VA
#define HWACCEL_D3D11VA(codec)
Definition: hwaccel.h:79
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
set_deblocking_bypass
static void set_deblocking_bypass(HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:1243
pixdesc.h
HEVCFrame::tab_mvf
MvField * tab_mvf
Definition: hevcdec.h:314
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:2186
TransformUnit::cu_qp_delta
int cu_qp_delta
Definition: hevcdec.h:286
HEVC_NAL_TSA_N
@ HEVC_NAL_TSA_N
Definition: hevc.h:31
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:522
HEVCFrame::hwaccel_picture_private
void * hwaccel_picture_private
Definition: hevcdec.h:326
av_display_matrix_flip
void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip)
Flip the input matrix horizontally and/or vertically.
Definition: display.c:65
AVPacket::data
uint8_t * data
Definition: avcodec.h:1477
PAR
#define PAR
Definition: hevcdec.c:3545
INTRA_DC
@ INTRA_DC
Definition: hevcdec.h:175
AVOption
AVOption.
Definition: opt.h:246
ff_h2645_packet_uninit
void ff_h2645_packet_uninit(H2645Packet *pkt)
Free all the allocated memory in the packet.
Definition: h2645_parse.c:517
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:470
hevc_decode_free
static av_cold int hevc_decode_free(AVCodecContext *avctx)
Definition: hevcdec.c:3303
ff_hevc_hls_filters
void ff_hevc_hls_filters(HEVCContext *s, int x_ctb, int y_ctb, int ctb_size)
Definition: hevc_filter.c:878
data
const char data[16]
Definition: mxf.c:91
Mv::y
int16_t y
vertical component of motion vector
Definition: hevcdec.h:257
ff_hevc_mpm_idx_decode
int ff_hevc_mpm_idx_decode(HEVCContext *s)
Definition: hevc_cabac.c:752
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:387
SAO_EDGE
@ SAO_EDGE
Definition: hevcdec.h:214
ff_hevc_hls_residual_coding
void ff_hevc_hls_residual_coding(HEVCContext *s, int x0, int y0, int log2_trafo_size, enum ScanType scan_idx, int c_idx)
Definition: hevc_cabac.c:1024
SliceHeader::slice_temporal_mvp_enabled_flag
uint8_t slice_temporal_mvp_enabled_flag
Definition: hevc_ps.h:76
MvField::mv
Mv mv[2]
Definition: hevcdec.h:261
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:229
TransformUnit::is_cu_qp_delta_coded
uint8_t is_cu_qp_delta_coded
Definition: hevcdec.h:294
HEVC_NAL_RASL_N
@ HEVC_NAL_RASL_N
Definition: hevc.h:37
av_mallocz_array
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:191
HEVC_NAL_STSA_R
@ HEVC_NAL_STSA_R
Definition: hevc.h:34
av_buffer_allocz
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:83
MODE_INTRA
@ MODE_INTRA
Definition: hevcdec.h:156
av_display_rotation_set
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure counterclockwise rotation by the specified angle...
Definition: display.c:50
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:84
HEVC_NAL_BLA_W_RADL
@ HEVC_NAL_BLA_W_RADL
Definition: hevc.h:46
SliceHeader::slice_loop_filter_across_slices_enabled_flag
uint8_t slice_loop_filter_across_slices_enabled_flag
Definition: hevc_ps.h:85
SAOParams::offset_sign
int offset_sign[3][4]
sao_offset_sign
Definition: hevcdsp.h:34
export_stream_params
static void export_stream_params(HEVCContext *s, const HEVCSPS *sps)
Definition: hevcdec.c:317
HEVCLocalContext::ctb_up_left_flag
uint8_t ctb_up_left_flag
Definition: hevcdec.h:360
H2645NAL::temporal_id
int temporal_id
HEVC only, nuh_temporal_id_plus_1 - 1.
Definition: h2645_parse.h:57
RefPicList
Definition: hevcdec.h:231
AV_STEREO3D_VIEW_RIGHT
@ AV_STEREO3D_VIEW_RIGHT
Frame contains only the right view.
Definition: stereo3d.h:161
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
OFFSET
#define OFFSET(x)
Definition: hevcdec.c:3544
PF_INTRA
@ PF_INTRA
Definition: hevcdec.h:167
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
MODE_SKIP
@ MODE_SKIP
Definition: hevcdec.h:157
HEVCLocalContext::end_of_tiles_x
int end_of_tiles_x
Definition: hevcdec.h:361
AVContentLightMetadata
Content light level needed by to transmit HDR over HDMI (CTA-861.3).
Definition: mastering_display_metadata.h:98
CodingUnit::x
int x
Definition: hevcdec.h:243
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
BOUNDARY_LEFT_TILE
#define BOUNDARY_LEFT_TILE
Definition: hevcdec.h:375
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:3105
golomb.h
exp golomb vlc stuff
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:67
PART_2Nx2N
@ PART_2Nx2N
Definition: hevcdec.h:144
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
av_buffer_pool_init
AVBufferPool * av_buffer_pool_init(int size, AVBufferRef *(*alloc)(int size))
Allocate and initialize a buffer pool.
Definition: buffer.c:238
fmt
const char * fmt
Definition: avisynth_c.h:861
SET_SAO
#define SET_SAO(elem, value)
Definition: hevcdec.c:935
HEVCLocalContext::ctb_up_right_flag
uint8_t ctb_up_right_flag
Definition: hevcdec.h:359
ff_hevc_clear_refs
void ff_hevc_clear_refs(HEVCContext *s)
Mark all frames in DPB as unused for reference.
Definition: hevc_refs.c:66
PRED_BI
@ PRED_BI
Definition: hevcdec.h:163
U
#define U(x)
Definition: vp56_arith.h:37
ff_hevc_split_coding_unit_flag_decode
int ff_hevc_split_coding_unit_flag_decode(HEVCContext *s, int ct_depth, int x0, int y0)
Definition: hevc_cabac.c:686
fail
#define fail()
Definition: checkasm.h:120
PredictionUnit::intra_pred_mode_c
uint8_t intra_pred_mode_c[4]
Definition: hevcdec.h:281
ff_hevc_sao_merge_flag_decode
int ff_hevc_sao_merge_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:561
AVCodecContext::thread_count
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
Definition: avcodec.h:2824
md5
struct AVMD5 * md5
Definition: movenc.c:56
InterPredIdc
InterPredIdc
Definition: hevcdec.h:160
MODE_INTER
@ MODE_INTER
Definition: hevcdec.h:155
GetBitContext
Definition: get_bits.h:61
HEVCLocalContext::pu
PredictionUnit pu
Definition: hevcdec.h:371
decode_lt_rps
static int decode_lt_rps(HEVCContext *s, LongTermRPS *rps, GetBitContext *gb)
Definition: hevcdec.c:259
TransformUnit::res_scale_val
int res_scale_val
Definition: hevcdec.h:288
SliceHeader::short_term_ref_pic_set_size
int short_term_ref_pic_set_size
Definition: hevc_ps.h:67
hevc_decoder_class
static const AVClass hevc_decoder_class
Definition: hevcdec.c:3555
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
ff_hevc_output_frame
int ff_hevc_output_frame(HEVCContext *s, AVFrame *out, int flush)
Find next frame in output order and put a reference to it in frame.
Definition: hevc_refs.c:174
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:1753
SliceHeader::long_term_ref_pic_set_size
int long_term_ref_pic_set_size
Definition: hevc_ps.h:70
ff_hevc_luma_mv_mvp_mode
void ff_hevc_luma_mv_mvp_mode(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv, int mvp_lx_flag, int LX)
Definition: hevc_mvs.c:582
CTB
#define CTB(tab, x, y)
Definition: hevcdec.c:933
ff_reset_entries
void ff_reset_entries(AVCodecContext *avctx)
Definition: pthread_slice.c:238
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
ff_hevc_skip_flag_decode
int ff_hevc_skip_flag_decode(HEVCContext *s, int x0, int y0, int x_cb, int y_cb)
Definition: hevc_cabac.c:618
ff_hevc_merge_flag_decode
int ff_hevc_merge_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:792
AVRational::num
int num
Numerator.
Definition: rational.h:59
src
#define src
Definition: vp8dsp.c:254
HWACCEL_VIDEOTOOLBOX
@ HWACCEL_VIDEOTOOLBOX
Definition: ffmpeg.h:62
SliceHeader::slice_segment_addr
unsigned int slice_segment_addr
address (in raster order) of the first block in the current slice
Definition: hevc_ps.h:52
hevc_parse.h
MvField::ref_idx
int8_t ref_idx[2]
Definition: hevcdec.h:262
ff_hevc_save_states
void ff_hevc_save_states(HEVCContext *s, int ctb_addr_ts)
Definition: hevc_cabac.c:450
ff_hevc_deblocking_boundary_strengths
void ff_hevc_deblocking_boundary_strengths(HEVCContext *s, int x0, int y0, int log2_trafo_size)
Definition: hevc_filter.c:714
SAOParams::eo_class
int eo_class[3]
sao_eo_class
Definition: hevcdsp.h:38
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:390
ff_hevc_prev_intra_luma_pred_flag_decode
int ff_hevc_prev_intra_luma_pred_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:747
ff_hevc_decode_nal_sei
int ff_hevc_decode_nal_sei(GetBitContext *gb, void *logctx, HEVCSEI *s, const HEVCParamSets *ps, int type)
Definition: hevc_sei.c:353
ff_thread_report_progress2
void ff_thread_report_progress2(AVCodecContext *avctx, int field, int thread, int n)
Definition: pthread_slice.c:174
first
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But first
Definition: rate_distortion.txt:12
hls_decode_entry_wpp
static int hls_decode_entry_wpp(AVCodecContext *avctxt, void *input_ctb_row, int job, int self_id)
Definition: hevcdec.c:2483
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:2179
AV_STEREO3D_FRAMESEQUENCE
@ AV_STEREO3D_FRAMESEQUENCE
Views are alternated temporally.
Definition: stereo3d.h:92
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
QPEL_EXTRA_AFTER
#define QPEL_EXTRA_AFTER
Definition: hevcdec.h:66
HEVC_NAL_BLA_N_LP
@ HEVC_NAL_BLA_N_LP
Definition: hevc.h:47
SAOParams::type_idx
uint8_t type_idx[3]
sao_type_idx
Definition: hevcdsp.h:42
buf
void * buf
Definition: avisynth_c.h:766
av_cold
#define av_cold
Definition: attributes.h:84
TransformUnit::intra_pred_mode
int intra_pred_mode
Definition: hevcdec.h:291
ff_hevc_hls_mvd_coding
void ff_hevc_hls_mvd_coding(HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevc_cabac.c:1534
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
HEVC_NAL_RADL_R
@ HEVC_NAL_RADL_R
Definition: hevc.h:36
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:557
SliceHeader::cabac_init_flag
uint8_t cabac_init_flag
Definition: hevc_ps.h:83
H2645NAL::size
int size
Definition: h2645_parse.h:35
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:42
hls_pcm_sample
static int hls_pcm_sample(HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:1381
hwaccel.h
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:1667
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1855
width
#define width
QPEL_EXTRA_BEFORE
#define QPEL_EXTRA_BEFORE
Definition: hevcdec.h:65
FFMAX3
#define FFMAX3(a, b, c)
Definition: common.h:95
stereo3d.h
AVMasteringDisplayMetadata::white_point
AVRational white_point[2]
CIE 1931 xy chromaticity coords of white point.
Definition: mastering_display_metadata.h:47
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:137
s
#define s(width, name)
Definition: cbs_vp9.c:257
ff_thread_await_progress2
void ff_thread_await_progress2(AVCodecContext *avctx, int field, int thread, int shift)
Definition: pthread_slice.c:185
SAO_NOT_APPLIED
@ SAO_NOT_APPLIED
Definition: hevcdec.h:212
hls_sao_param
static void hls_sao_param(HEVCContext *s, int rx, int ry)
Definition: hevcdec.c:947
set_sps
static int set_sps(HEVCContext *s, const HEVCSPS *sps, enum AVPixelFormat pix_fmt)
Definition: hevcdec.c:445
AV_ZERO32
#define AV_ZERO32(d)
Definition: intreadwrite.h:629
ff_hevc_ref_idx_lx_decode
int ff_hevc_ref_idx_lx_decode(HEVCContext *s, int num_ref_idx_lx)
Definition: hevc_cabac.c:807
HEVC_MAX_REFS
@ HEVC_MAX_REFS
Definition: hevc.h:119
s1
#define s1
Definition: regdef.h:38
ff_hevc_nal_is_nonref
static av_always_inline int ff_hevc_nal_is_nonref(enum HEVCNALUnitType type)
Definition: hevcdec.h:552
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
luma_intra_pred_mode
static int luma_intra_pred_mode(HEVCContext *s, int x0, int y0, int pu_size, int prev_intra_luma_pred_flag)
8.4.1
Definition: hevcdec.c:1926
ff_hevc_set_new_ref
int ff_hevc_set_new_ref(HEVCContext *s, AVFrame **frame, int poc)
Definition: hevc_refs.c:135
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
SliceHeader::slice_rps
ShortTermRPS slice_rps
Definition: hevc_ps.h:68
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
ff_hevc_cu_transquant_bypass_flag_decode
int ff_hevc_cu_transquant_bypass_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:613
IS_IDR
#define IS_IDR(s)
Definition: hevcdec.h:77
ff_hevc_intra_chroma_pred_mode_decode
int ff_hevc_intra_chroma_pred_mode_decode(HEVCContext *s)
Definition: hevc_cabac.c:770
set_ct_depth
static av_always_inline void set_ct_depth(HEVCContext *s, int x0, int y0, int log2_cb_size, int ct_depth)
Definition: hevcdec.c:2006
H2645NAL::data
const uint8_t * data
Definition: h2645_parse.h:36
ff_hevc_slice_rpl
int ff_hevc_slice_rpl(HEVCContext *s)
Construct the reference picture list(s) for the current slice.
Definition: hevc_refs.c:291
RefPicList::ref
struct HEVCFrame * ref[HEVC_MAX_REFS]
Definition: hevcdec.h:232
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demuxing_decoding.c:40
ff_hevc_sao_offset_abs_decode
int ff_hevc_sao_offset_abs_decode(HEVCContext *s)
Definition: hevc_cabac.c:586
H2645NAL::skipped_bytes_pos
int * skipped_bytes_pos
Definition: h2645_parse.h:61
HEVC_SLICE_I
@ HEVC_SLICE_I
Definition: hevc.h:98
hls_coding_unit
static int hls_coding_unit(HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:2114
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
SliceHeader::size
int * size
Definition: hevc_ps.h:103
SliceHeader::collocated_list
uint8_t collocated_list
Definition: hevc_ps.h:86
atomic_load
#define atomic_load(object)
Definition: stdatomic.h:93
ff_hevc_luma_mv_merge_mode
void ff_hevc_luma_mv_merge_mode(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv)
Definition: hevc_mvs.c:479
AVCOL_PRI_UNSPECIFIED
@ AVCOL_PRI_UNSPECIFIED
Definition: pixfmt.h:446
AVDISCARD_BIDIR
@ AVDISCARD_BIDIR
discard all bidirectional frames
Definition: avcodec.h:808
get_se_golomb
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
Definition: golomb.h:239
INTRA_ANGULAR_26
@ INTRA_ANGULAR_26
Definition: hevcdec.h:200
H2645NAL::type
int type
NAL unit type.
Definition: h2645_parse.h:52
CodingUnit::max_trafo_depth
uint8_t max_trafo_depth
MaxTrafoDepth.
Definition: hevcdec.h:251
SliceHeader::slice_ctb_addr_rs
int slice_ctb_addr_rs
Definition: hevc_ps.h:122
int32_t
int32_t
Definition: audio_convert.c:194
av_packet_get_side_data
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:350
arg
const char * arg
Definition: jacosubdec.c:66
AVStereo3D::flags
int flags
Additional information about the frame packing.
Definition: stereo3d.h:185
if
if(ret)
Definition: filter_design.txt:179
HEVC_NAL_IDR_N_LP
@ HEVC_NAL_IDR_N_LP
Definition: hevc.h:49
SliceHeader::pic_output_flag
uint8_t pic_output_flag
Definition: hevc_ps.h:62
hls_slice_data_wpp
static int hls_slice_data_wpp(HEVCContext *s, const H2645NAL *nal)
Definition: hevcdec.c:2564
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:1037
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: avcodec.h:811
ff_hevc_sao_offset_sign_decode
int ff_hevc_sao_offset_sign_decode(HEVCContext *s)
Definition: hevc_cabac.c:596
PredictionUnit::rem_intra_luma_pred_mode
int rem_intra_luma_pred_mode
Definition: hevcdec.h:277
H2645NAL::raw_size
int raw_size
Definition: h2645_parse.h:44
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
IS_BLA
#define IS_BLA(s)
Definition: hevcdec.h:78
ff_hevc_merge_idx_decode
int ff_hevc_merge_idx_decode(HEVCContext *s)
Definition: hevc_cabac.c:781
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
ff_bswapdsp_init
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
HEVC_SLICE_B
@ HEVC_SLICE_B
Definition: hevc.h:96
flush
static void flush(AVCodecContext *avctx)
Definition: aacdec_template.c:500
NULL
#define NULL
Definition: coverity.c:32
hevc_ref_frame
static int hevc_ref_frame(HEVCContext *s, HEVCFrame *dst, HEVCFrame *src)
Definition: hevcdec.c:3263
HWACCEL_DXVA2
#define HWACCEL_DXVA2(codec)
Definition: hwaccel.h:67
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2200
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:125
HEVCLocalContext::tmp
int16_t tmp[MAX_PB_SIZE *MAX_PB_SIZE]
Definition: hevcdec.h:367
ff_hevc_ps_uninit
void ff_hevc_ps_uninit(HEVCParamSets *ps)
Definition: hevc_ps.c:1721
HEVC_NAL_PPS
@ HEVC_NAL_PPS
Definition: hevc.h:63
LongTermRPS::poc
int poc[32]
Definition: hevc_ps.h:43
CodingUnit::cu_transquant_bypass_flag
uint8_t cu_transquant_bypass_flag
Definition: hevcdec.h:252
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1600
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
HEVCLocalContext::first_qp_group
uint8_t first_qp_group
Definition: hevcdec.h:345
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:191
hls_transform_unit
static int hls_transform_unit(HEVCContext *s, int x0, int y0, int xBase, int yBase, int cb_xBase, int cb_yBase, int log2_cb_size, int log2_trafo_size, int blk_idx, int cbf_luma, int *cbf_cb, int *cbf_cr)
Definition: hevcdec.c:1037
AVHWAccel::end_frame
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
Definition: avcodec.h:3750
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
profiles.h
av_buffer_pool_uninit
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:275
L0
#define L0
Definition: hevcdec.h:59
HEVCFrame::rpl_tab
RefPicListTab ** rpl_tab
Definition: hevcdec.h:316
ff_hevc_pel_weight
const uint8_t ff_hevc_pel_weight[65]
Definition: hevcdec.c:47
HEVC_NAL_SEI_SUFFIX
@ HEVC_NAL_SEI_SUFFIX
Definition: hevc.h:69
HEVC_NAL_CRA_NUT
@ HEVC_NAL_CRA_NUT
Definition: hevc.h:50
ONLY_IF_THREADS_ENABLED
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:227
PART_Nx2N
@ PART_Nx2N
Definition: hevcdec.h:146
RefPicListTab
Definition: hevcdec.h:238
BOUNDARY_UPPER_TILE
#define BOUNDARY_UPPER_TILE
Definition: hevcdec.h:377
vps
static int FUNC() vps(CodedBitstreamContext *ctx, RWContext *rw, H265RawVPS *current)
Definition: cbs_h265_syntax_template.c:420
ff_hevc_decode_extradata
int ff_hevc_decode_extradata(const uint8_t *data, int size, HEVCParamSets *ps, HEVCSEI *sei, int *is_nalff, int *nal_length_size, int err_recognition, int apply_defdispwin, void *logctx)
Definition: hevc_parse.c:78
update_thread_context
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread. If the codec allocates writable tables in its init()
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:2705
SliceHeader::nb_refs
unsigned int nb_refs[2]
Definition: hevc_ps.h:78
Mv::x
int16_t x
horizontal component of motion vector
Definition: hevcdec.h:256
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: internal.h:66
AVCodecContext::level
int level
level
Definition: avcodec.h:3018
HEVC_NAL_RASL_R
@ HEVC_NAL_RASL_R
Definition: hevc.h:38
PF_BI
@ PF_BI
Definition: hevcdec.h:170
SAMPLE_CTB
#define SAMPLE_CTB(tab, x, y)
Definition: hevcdec.h:75
HEVCWindow
Definition: hevc_ps.h:125
SCAN_HORIZ
@ SCAN_HORIZ
Definition: hevcdec.h:227
hevc_data.h
error
static void error(const char *err)
Definition: target_dec_fuzzer.c:61
hevc_decode_frame
static int hevc_decode_frame(AVCodecContext *avctx, void *data, int *got_output, AVPacket *avpkt)
Definition: hevcdec.c:3200
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
ff_hevc_frame_rps
int ff_hevc_frame_rps(HEVCContext *s)
Construct the reference picture sets for the current frame.
Definition: hevc_refs.c:451
HEVCLocalContext::edge_emu_buffer
uint8_t edge_emu_buffer[(MAX_PB_SIZE+7) *EDGE_EMU_BUFFER_STRIDE *2]
Definition: hevcdec.h:364
IS_IRAP
#define IS_IRAP(s)
Definition: hevcdec.h:80
LongTermRPS::used
uint8_t used[32]
Definition: hevc_ps.h:44
SliceHeader::colour_plane_id
uint8_t colour_plane_id
RPS coded in the slice header itself is stored here.
Definition: hevc_ps.h:63
ff_hevc_mvp_lx_flag_decode
int ff_hevc_mvp_lx_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:823
PART_nLx2N
@ PART_nLx2N
Definition: hevcdec.h:150
SliceHeader::dependent_slice_segment_flag
uint8_t dependent_slice_segment_flag
Definition: hevc_ps.h:61
POS
#define POS(c_idx, x, y)
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: avcodec.h:810
SliceHeader::first_slice_in_pic_flag
uint8_t first_slice_in_pic_flag
Definition: hevc_ps.h:60
desc
const char * desc
Definition: nvenc.c:68
HEVCLocalContext::ctb_left_flag
uint8_t ctb_left_flag
Definition: hevcdec.h:357
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:981
chroma_mc_uni
static void chroma_mc_uni(HEVCContext *s, uint8_t *dst0, ptrdiff_t dststride, uint8_t *src0, ptrdiff_t srcstride, int reflist, int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int chroma_weight, int chroma_offset)
8.5.3.2.2.2 Chroma sample uniprediction interpolation process
Definition: hevcdec.c:1590
ff_hevc_pred_mode_decode
int ff_hevc_pred_mode_decode(HEVCContext *s)
Definition: hevc_cabac.c:681
AVPacket::size
int size
Definition: avcodec.h:1478
BOUNDARY_UPPER_SLICE
#define BOUNDARY_UPPER_SLICE
Definition: hevcdec.h:376
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
hevcdec.h
ff_hevc_set_neighbour_available
void ff_hevc_set_neighbour_available(HEVCContext *s, int x0, int y0, int nPbW, int nPbH)
Definition: hevc_mvs.c:42
decode_nal_units
static int decode_nal_units(HEVCContext *s, const uint8_t *buf, int length)
Definition: hevcdec.c:3046
SAOParams::offset_abs
int offset_abs[3][4]
sao_offset_abs
Definition: hevcdsp.h:33
AVCodecInternal::allocate_progress
int allocate_progress
Whether to allocate progress for frame threading.
Definition: internal.h:151
print_md5
static void print_md5(void *log_ctx, int level, uint8_t md5[16])
Definition: hevcdec.c:3105
INTRA_PLANAR
@ INTRA_PLANAR
Definition: hevcdec.h:174
HEVCFrame::rpl_buf
AVBufferRef * rpl_buf
Definition: hevcdec.h:323
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
ff_hevc_decode_nal_sps
int ff_hevc_decode_nal_sps(GetBitContext *gb, AVCodecContext *avctx, HEVCParamSets *ps, int apply_defdispwin)
Definition: hevc_ps.c:1224
PART_2NxnD
@ PART_2NxnD
Definition: hevcdec.h:149
size
int size
Definition: twinvq_data.h:11134
HEVC_NAL_BLA_W_LP
@ HEVC_NAL_BLA_W_LP
Definition: hevc.h:45
SCAN_VERT
@ SCAN_VERT
Definition: hevcdec.h:228
ff_hevc_compute_poc
int ff_hevc_compute_poc(const HEVCSPS *sps, int pocTid0, int poc_lsb, int nal_unit_type)
Compute POC of the current frame and return it.
Definition: hevc_ps.c:1737
H2645NAL::gb
GetBitContext gb
Definition: h2645_parse.h:47
intra_prediction_unit_default_value
static void intra_prediction_unit_default_value(HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:2091
SliceHeader::collocated_ref_idx
unsigned int collocated_ref_idx
Definition: hevc_ps.h:88
SliceHeader::entry_point_offset
unsigned * entry_point_offset
Definition: hevc_ps.h:101
H2645NAL
Definition: h2645_parse.h:32
hevc_await_progress
static void hevc_await_progress(HEVCContext *s, HEVCFrame *ref, const Mv *mv, int y0, int height)
Definition: hevcdec.c:1745
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:394
ff_hevc_decode_nal_vps
int ff_hevc_decode_nal_vps(GetBitContext *gb, AVCodecContext *avctx, HEVCParamSets *ps)
Definition: hevc_ps.c:424
pic_arrays_free
static void pic_arrays_free(HEVCContext *s)
NOTE: Each function hls_foo correspond to the function foo in the specification (HLS stands for High ...
Definition: hevcdec.c:59
AVFrameSideData::data
uint8_t * data
Definition: frame.h:203
TransformUnit::chroma_mode_c
int chroma_mode_c
Definition: hevcdec.h:293
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:2836
GetBitContext::index
int index
Definition: get_bits.h:67
SliceHeader::short_term_ref_pic_set_sps_flag
int short_term_ref_pic_set_sps_flag
Definition: hevc_ps.h:66
SliceHeader::no_output_of_prior_pics_flag
uint8_t no_output_of_prior_pics_flag
Definition: hevc_ps.h:75
SliceHeader::max_num_merge_cand
unsigned int max_num_merge_cand
5 - 5_minus_max_num_merge_cand
Definition: hevc_ps.h:99
AVCodecHWConfigInternal
Definition: hwaccel.h:29
MvField
Definition: hevcdec.h:260
QPEL_EXTRA
#define QPEL_EXTRA
Definition: hevcdec.h:67
val
const char const char void * val
Definition: avisynth_c.h:863
PF_L1
@ PF_L1
Definition: hevcdec.h:169
ff_hevc_unref_frame
void ff_hevc_unref_frame(HEVCContext *s, HEVCFrame *frame, int flags)
Definition: hevc_refs.c:32
split
static char * split(char *message, char delim)
Definition: af_channelmap.c:81
get_format
static enum AVPixelFormat get_format(HEVCContext *s, const HEVCSPS *sps)
Definition: hevcdec.c:371
ff_h2645_packet_split
int ff_h2645_packet_split(H2645Packet *pkt, const uint8_t *buf, int length, void *logctx, int is_nalff, int nal_length_size, enum AVCodecID codec_id, int small_padding, int use_ref)
Split an input packet into NAL units.
Definition: h2645_parse.c:393
height
#define height
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: avcodec.h:1041
ff_hevc_pcm_flag_decode
int ff_hevc_pcm_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:742
av_content_light_metadata_create_side_data
AVContentLightMetadata * av_content_light_metadata_create_side_data(AVFrame *frame)
Allocate a complete AVContentLightMetadata and add it to the frame.
Definition: mastering_display_metadata.c:55
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
ff_hevc_cbf_cb_cr_decode
int ff_hevc_cbf_cb_cr_decode(HEVCContext *s, int trafo_depth)
Definition: hevc_cabac.c:871
attributes.h
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:313
hls_slice_data
static int hls_slice_data(HEVCContext *s)
Definition: hevcdec.c:2472
TransformUnit::cu_qp_offset_cb
int8_t cu_qp_offset_cb
Definition: hevcdec.h:296
pic_arrays_init
static int pic_arrays_init(HEVCContext *s, const HEVCSPS *sps)
Definition: hevcdec.c:87
AV_STEREO3D_FLAG_INVERT
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:167
HEVCFrame::rpl_tab_buf
AVBufferRef * rpl_tab_buf
Definition: hevcdec.h:322
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Definition: pixfmt.h:122
MvField::pred_flag
int8_t pred_flag
Definition: hevcdec.h:263
HEVCLocalContext::ct_depth
int ct_depth
Definition: hevcdec.h:369
src0
#define src0
Definition: h264pred.c:138
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:2835
ff_init_cabac_decoder
int ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size)
Definition: cabac.c:177
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:197
PART_nRx2N
@ PART_nRx2N
Definition: hevcdec.h:151
EPEL_EXTRA_BEFORE
#define EPEL_EXTRA_BEFORE
Definition: hevcdec.h:62
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:282
SliceHeader::slice_cb_qp_offset
int slice_cb_qp_offset
Definition: hevc_ps.h:91
SliceHeader
Definition: hevc_ps.h:48
HEVCFrame::frame
AVFrame * frame
Definition: hevcdec.h:312
HEVC_NAL_TRAIL_R
@ HEVC_NAL_TRAIL_R
Definition: hevc.h:30
src1
#define src1
Definition: h264pred.c:139
hls_decode_entry
static int hls_decode_entry(AVCodecContext *avctxt, void *isFilterThread)
Definition: hevcdec.c:2411
ff_hevc_inter_pred_idc_decode
int ff_hevc_inter_pred_idc_decode(HEVCContext *s, int nPbW, int nPbH)
Definition: hevc_cabac.c:797
ff_hevc_cu_qp_delta_sign_flag
int ff_hevc_cu_qp_delta_sign_flag(HEVCContext *s)
Definition: hevc_cabac.c:660
hevc_frame_start
static int hevc_frame_start(HEVCContext *s)
Definition: hevcdec.c:2800
av_md5_init
void av_md5_init(AVMD5 *ctx)
Initialize MD5 hashing.
Definition: md5.c:143
SliceHeader::slice_sample_adaptive_offset_flag
uint8_t slice_sample_adaptive_offset_flag[3]
Definition: hevc_ps.h:80
AVDISCARD_NONINTRA
@ AVDISCARD_NONINTRA
discard all non intra frames
Definition: avcodec.h:809
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
HEVCFrame
Definition: hevcdec.h:311
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1666
HEVCLocalContext::gb
GetBitContext gb
Definition: hevcdec.h:347
ff_hevc_cbf_luma_decode
int ff_hevc_cbf_luma_decode(HEVCContext *s, int trafo_depth)
Definition: hevc_cabac.c:876
internal.h
EPEL_EXTRA_AFTER
#define EPEL_EXTRA_AFTER
Definition: hevcdec.h:63
HEVCFrame::ctb_count
int ctb_count
Definition: hevcdec.h:317
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
display.h
SliceHeader::offset
int * offset
Definition: hevc_ps.h:102
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:79
common.h
HEVCFrame::sequence
uint16_t sequence
A sequence counter, so that old frames are output first after a POC reset.
Definition: hevcdec.h:332
SliceHeader::mvd_l1_zero_flag
uint8_t mvd_l1_zero_flag
Definition: hevc_ps.h:81
delta
float delta
Definition: vorbis_enc_data.h:457
md5.h
AV_CODEC_ID_HEVC
@ AV_CODEC_ID_HEVC
Definition: avcodec.h:392
ff_hevc_bump_frame
void ff_hevc_bump_frame(HEVCContext *s)
Definition: hevc_refs.c:233
av_always_inline
#define av_always_inline
Definition: attributes.h:43
HEVC_SLICE_P
@ HEVC_SLICE_P
Definition: hevc.h:97
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:582
PF_L0
@ PF_L0
Definition: hevcdec.h:168
EDGE_EMU_BUFFER_STRIDE
#define EDGE_EMU_BUFFER_STRIDE
Definition: hevcdec.h:69
tab_mode_idx
static const uint8_t tab_mode_idx[]
Definition: hevcdec.c:2019
uint8_t
uint8_t
Definition: audio_convert.c:194
cabac_functions.h
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
HEVCLocalContext::qp_y
int8_t qp_y
Definition: hevcdec.h:350
AVCodec::name
const char * name
Name of the codec implementation.
Definition: avcodec.h:3488
AVMasteringDisplayMetadata
Mastering display metadata capable of representing the color volume of the display used to master the...
Definition: mastering_display_metadata.h:38
HEVC_NAL_TSA_R
@ HEVC_NAL_TSA_R
Definition: hevc.h:32
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:499
SliceHeader::list_entry_lx
unsigned int list_entry_lx[2][32]
Definition: hevc_ps.h:72
AVCodecContext::height
int height
Definition: avcodec.h:1738
hevc_decode_extradata
static int hevc_decode_extradata(HEVCContext *s, uint8_t *buf, int length, int first)
Definition: hevcdec.c:3178
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1775
av_md5_final
void av_md5_final(AVMD5 *ctx, uint8_t *dst)
Finish hashing and output digest value.
Definition: md5.c:192
hevc_decode_init
static av_cold int hevc_decode_init(AVCodecContext *avctx)
Definition: hevcdec.c:3481
HEVCFrame::poc
int poc
Definition: hevcdec.h:318
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: pixfmt.h:521
hevc.h
ff_hevc_cu_chroma_qp_offset_idx
int ff_hevc_cu_chroma_qp_offset_idx(HEVCContext *s)
Definition: hevc_cabac.c:670
SAOParams
Definition: hevcdsp.h:32
SliceHeader::short_term_rps
const ShortTermRPS * short_term_rps
Definition: hevc_ps.h:69
HEVC_NAL_VPS
@ HEVC_NAL_VPS
Definition: hevc.h:61
SliceHeader::cu_chroma_qp_offset_enabled_flag
uint8_t cu_chroma_qp_offset_enabled_flag
Definition: hevc_ps.h:94
HEVC_NAL_IDR_W_RADL
@ HEVC_NAL_IDR_W_RADL
Definition: hevc.h:48
ff_hevc_cu_chroma_qp_offset_flag
int ff_hevc_cu_chroma_qp_offset_flag(HEVCContext *s)
Definition: hevc_cabac.c:665
ret
ret
Definition: filter_design.txt:187
H2645NAL::raw_data
const uint8_t * raw_data
Definition: h2645_parse.h:45
ff_hevc_reset_sei
void ff_hevc_reset_sei(HEVCSEI *s)
Reset SEI values that are stored on the Context.
Definition: hevc_sei.c:366
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
PRED_L1
@ PRED_L1
Definition: hevcdec.h:162
PredictionUnit::mvd
Mv mvd
Definition: hevcdec.h:279
SliceHeader::disable_deblocking_filter_flag
uint8_t disable_deblocking_filter_flag
slice_header_disable_deblocking_filter_flag
Definition: hevc_ps.h:84
ff_hevc_dsp_init
void ff_hevc_dsp_init(HEVCDSPContext *hevcdsp, int bit_depth)
Definition: hevcdsp.c:126
HEVCLocalContext::edge_emu_buffer2
uint8_t edge_emu_buffer2[(MAX_PB_SIZE+7) *EDGE_EMU_BUFFER_STRIDE *2]
Definition: hevcdec.h:366
AV_EF_CRCCHECK
#define AV_EF_CRCCHECK
Verify checksums embedded in the bitstream (could be of either encoded or decoded data,...
Definition: avcodec.h:2702
AVStereo3D::type
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:180
sps
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
Definition: cbs_h264_syntax_template.c:260
av_md5_update
void av_md5_update(AVMD5 *ctx, const uint8_t *src, int len)
Update hash value.
Definition: md5.c:154
hevc_init_context
static av_cold int hevc_init_context(AVCodecContext *avctx)
Definition: hevcdec.c:3349
hevc_luma_mv_mvp_mode
static void hevc_luma_mv_mvp_mode(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv)
Definition: hevcdec.c:1755
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:391
HEVC_NAL_EOS_NUT
@ HEVC_NAL_EOS_NUT
Definition: hevc.h:65
ff_hevc_rem_intra_luma_pred_mode_decode
int ff_hevc_rem_intra_luma_pred_mode_decode(HEVCContext *s)
Definition: hevc_cabac.c:760
ff_hevc_frame_nb_refs
int ff_hevc_frame_nb_refs(const HEVCContext *s)
Get the number of candidate references for the current frame.
Definition: hevc_refs.c:511
hls_prediction_unit
static void hls_prediction_unit(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int partIdx, int idx)
Definition: hevcdec.c:1800
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen_template.c:38
HEVCLocalContext::boundary_flags
int boundary_flags
Definition: hevcdec.h:380
HEVC_NAL_TRAIL_N
@ HEVC_NAL_TRAIL_N
Definition: hevc.h:29
LongTermRPS
Definition: hevc_ps.h:42
ff_set_sar
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
Definition: utils.c:119
SliceHeader::slice_type
enum HEVCSliceType slice_type
Definition: hevc_ps.h:56
ff_hevc_flush_dpb
void ff_hevc_flush_dpb(HEVCContext *s)
Drop all frames currently in DPB.
Definition: hevc_refs.c:75
HEVC_NAL_AUD
@ HEVC_NAL_AUD
Definition: hevc.h:64
AVCodecContext
main external API structure.
Definition: avcodec.h:1565
AVCodecContext::active_thread_type
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:2843
SliceHeader::slice_qp
int8_t slice_qp
Definition: hevc_ps.h:106
hls_coding_quadtree
static int hls_coding_quadtree(HEVCContext *s, int x0, int y0, int log2_cb_size, int cb_depth)
Definition: hevcdec.c:2277
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwaccel.h:73
av_mastering_display_metadata_create_side_data
AVMasteringDisplayMetadata * av_mastering_display_metadata_create_side_data(AVFrame *frame)
Allocate a complete AVMasteringDisplayMetadata and add it to the frame.
Definition: mastering_display_metadata.c:32
SUBDIVIDE
#define SUBDIVIDE(x, y, idx)
PredictionUnit::merge_flag
uint8_t merge_flag
Definition: hevcdec.h:280
av_md5_alloc
struct AVMD5 * av_md5_alloc(void)
Allocate an AVMD5 context.
Definition: md5.c:48
AV_PKT_DATA_NEW_EXTRADATA
@ AV_PKT_DATA_NEW_EXTRADATA
The AV_PKT_DATA_NEW_EXTRADATA is used to notify the codec or the format that the extradata buffer was...
Definition: avcodec.h:1199
AVRational::den
int den
Denominator.
Definition: rational.h:60
pred_weight_table
static int pred_weight_table(HEVCContext *s, GetBitContext *gb)
Definition: hevcdec.c:143
SliceHeader::slice_cr_qp_offset
int slice_cr_qp_offset
Definition: hevc_ps.h:92
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
HEVCContext
Definition: hevcdec.h:383
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:2898
CodingUnit::pred_mode
enum PredMode pred_mode
PredMode.
Definition: hevcdec.h:246
SliceHeader::pic_order_cnt_lsb
int pic_order_cnt_lsb
Definition: hevc_ps.h:58
ff_hevc_cabac_init
int ff_hevc_cabac_init(HEVCContext *s, int ctb_addr_ts)
Definition: hevc_cabac.c:504
HEVCLocalContext::qPy_pred
int qPy_pred
Definition: hevcdec.h:353
HEVCFrame::tab_mvf_buf
AVBufferRef * tab_mvf_buf
Definition: hevcdec.h:321
SCAN_DIAG
@ SCAN_DIAG
Definition: hevcdec.h:226
SliceHeader::rpl_modification_flag
uint8_t rpl_modification_flag[2]
Definition: hevc_ps.h:74
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:1006
AVMasteringDisplayMetadata::min_luminance
AVRational min_luminance
Min luminance of mastering display (cd/m^2).
Definition: mastering_display_metadata.h:52
FF_CODEC_PROPERTY_CLOSED_CAPTIONS
#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS
Definition: avcodec.h:3229
av_buffer_ref
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
HEVCLocalContext::tu
TransformUnit tu
Definition: hevcdec.h:355
hls_cross_component_pred
static int hls_cross_component_pred(HEVCContext *s, int idx)
Definition: hevcdec.c:1021
hls_slice_header
static int hls_slice_header(HEVCContext *s)
Definition: hevcdec.c:501
CodingUnit::y
int y
Definition: hevcdec.h:244
set_side_data
static int set_side_data(HEVCContext *s)
Definition: hevcdec.c:2662
ff_thread_ref_frame
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
Definition: utils.c:1861
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:1753
Mv
Definition: hevcdec.h:255
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
HEVC_NAL_SPS
@ HEVC_NAL_SPS
Definition: hevc.h:62
PRED_L0
@ PRED_L0
Definition: hevcdec.h:161
get_bitsz
static av_always_inline int get_bitsz(GetBitContext *s, int n)
Read 0-25 bits.
Definition: get_bits.h:415
HEVCVPS
Definition: hevc_ps.h:195
mastering_display_metadata.h
get_ue_golomb_long
static unsigned get_ue_golomb_long(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to UINT32_MAX-1.
Definition: golomb.h:105
ff_hevc_sao_band_position_decode
int ff_hevc_sao_band_position_decode(HEVCContext *s)
Definition: hevc_cabac.c:576
EPEL_EXTRA
#define EPEL_EXTRA
Definition: hevcdec.h:64
ff_hevc_part_mode_decode
int ff_hevc_part_mode_decode(HEVCContext *s, int log2_cb_size)
Definition: hevc_cabac.c:705
s0
#define s0
Definition: regdef.h:37
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:33
HEVCSPS
Definition: hevc_ps.h:225
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:201
ff_thread_get_format
enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Wrapper around get_format() for frame-multithreaded codecs.
Definition: pthread_frame.c:938
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
HEVCPPS
Definition: hevc_ps.h:321
CodingUnit::part_mode
enum PartMode part_mode
PartMode.
Definition: hevcdec.h:247
AVStereo3D::view
enum AVStereo3DView view
Determines which views are packed.
Definition: stereo3d.h:190
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
SliceHeader::tc_offset
int tc_offset
tc_offset_div2 * 2
Definition: hevc_ps.h:97
LongTermRPS::nb_refs
uint8_t nb_refs
Definition: hevc_ps.h:45
AVPacket
This structure stores compressed data.
Definition: avcodec.h:1454
AVContentLightMetadata::MaxFALL
unsigned MaxFALL
Max average light level per frame (cd/m^2).
Definition: mastering_display_metadata.h:107
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:1592
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:240
TransformUnit::cross_pf
uint8_t cross_pf
Definition: hevcdec.h:298
SAOParams::offset_val
int16_t offset_val[3][5]
SaoOffsetVal.
Definition: hevcdsp.h:40
HEVCLocalContext::cu
CodingUnit cu
Definition: hevcdec.h:370
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:500
SliceHeader::pps_id
unsigned int pps_id
address (in raster order) of the first block in the current slice segment
Definition: hevc_ps.h:49
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:1738
bytestream.h
ff_hevc_decode_short_term_rps
int ff_hevc_decode_short_term_rps(GetBitContext *gb, AVCodecContext *avctx, ShortTermRPS *rps, const HEVCSPS *sps, int is_slice_header)
Definition: hevc_ps.c:119
PredictionUnit::mpm_idx
int mpm_idx
Definition: hevcdec.h:276
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
HEVC_NAL_FD_NUT
@ HEVC_NAL_FD_NUT
Definition: hevc.h:67
PredictionUnit::chroma_mode_c
uint8_t chroma_mode_c[4]
Definition: hevcdec.h:282
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
length
const char int length
Definition: avisynth_c.h:860
skip_bytes
static const av_unused uint8_t * skip_bytes(CABACContext *c, int n)
Skip n bytes and reset the decoder.
Definition: cabac_functions.h:197
PredictionUnit::intra_pred_mode
uint8_t intra_pred_mode[4]
Definition: hevcdec.h:278
ff_hevc_decode_nal_pps
int ff_hevc_decode_nal_pps(GetBitContext *gb, AVCodecContext *avctx, HEVCParamSets *ps)
Definition: hevc_ps.c:1473
TransformUnit::is_cu_chroma_qp_offset_coded
uint8_t is_cu_chroma_qp_offset_coded
Definition: hevcdec.h:295
hls_transform_tree
static int hls_transform_tree(HEVCContext *s, int x0, int y0, int xBase, int yBase, int cb_xBase, int cb_yBase, int log2_cb_size, int log2_trafo_size, int trafo_depth, int blk_idx, const int *base_cbf_cb, const int *base_cbf_cr)
Definition: hevcdec.c:1258
h
h
Definition: vp9dsp_template.c:2038
BOUNDARY_LEFT_SLICE
#define BOUNDARY_LEFT_SLICE
Definition: hevcdec.h:374
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:176
SliceHeader::slice_qp_delta
int slice_qp_delta
Definition: hevc_ps.h:90
SliceHeader::slice_addr
unsigned int slice_addr
Definition: hevc_ps.h:54
ff_hevc_decoder
AVCodec ff_hevc_decoder
Definition: hevcdec.c:3562
ff_hevc_log2_res_scale_abs
int ff_hevc_log2_res_scale_abs(HEVCContext *s, int idx)
Definition: hevc_cabac.c:896
HEVC_NAL_EOB_NUT
@ HEVC_NAL_EOB_NUT
Definition: hevc.h:66
atomic_init
#define atomic_init(obj, value)
Definition: stdatomic.h:33
TransformUnit::intra_pred_mode_c
int intra_pred_mode_c
Definition: hevcdec.h:292
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: avcodec.h:807
HEVC_NAL_SEI_PREFIX
@ HEVC_NAL_SEI_PREFIX
Definition: hevc.h:68
int
int
Definition: ffmpeg_filter.c:191
HEVCLocalContext::end_of_tiles_y
int end_of_tiles_y
Definition: hevcdec.h:362
CodingUnit::intra_split_flag
uint8_t intra_split_flag
IntraSplitFlag.
Definition: hevcdec.h:250
ff_hevc_end_of_slice_flag_decode
int ff_hevc_end_of_slice_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:608
intra_prediction_unit
static void intra_prediction_unit(HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:2023
SHIFT_CTB_WPP
#define SHIFT_CTB_WPP
Definition: hevcdec.h:46
av_color_transfer_name
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
Definition: pixdesc.c:2891
HWACCEL_VDPAU
#define HWACCEL_VDPAU(codec)
Definition: hwaccel.h:75
luma_mc_uni
static void luma_mc_uni(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride, AVFrame *ref, const Mv *mv, int x_off, int y_off, int block_w, int block_h, int luma_weight, int luma_offset)
8.5.3.2.2.1 Luma sample unidirectional interpolation process
Definition: hevcdec.c:1438
PART_2NxN
@ PART_2NxN
Definition: hevcdec.h:145
HEVCParamSets::vps_list
AVBufferRef * vps_list[HEVC_MAX_VPS_COUNT]
Definition: hevc_ps.h:400
SliceHeader::long_term_rps
LongTermRPS long_term_rps
Definition: hevc_ps.h:71
HEVC_MAX_PPS_COUNT
@ HEVC_MAX_PPS_COUNT
Definition: hevc.h:114
HEVCLocalContext::cc
CABACContext cc
Definition: hevcdec.h:348
TransformUnit::cu_qp_offset_cr
int8_t cu_qp_offset_cr
Definition: hevcdec.h:297
ff_alloc_entries
int ff_alloc_entries(AVCodecContext *avctx, int count)
Definition: pthread_slice.c:201
options
static const AVOption options[]
Definition: hevcdec.c:3547
HEVC_CONTEXTS
#define HEVC_CONTEXTS
Definition: hevcdec.h:55
HEVCParamSets
Definition: hevc_ps.h:399
HWACCEL_D3D11VA2
#define HWACCEL_D3D11VA2(codec)
Definition: hwaccel.h:69