FFmpeg
vp9_superframe_split_bsf.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 /**
20  * @file
21  * This bitstream filter splits VP9 superframes into packets containing
22  * just one frame.
23  */
24 
25 #include <stddef.h>
26 
27 #include "avcodec.h"
28 #include "bsf.h"
29 #include "bytestream.h"
30 #include "get_bits.h"
31 
32 typedef struct VP9SFSplitContext {
34 
35  int nb_frames;
38  int sizes[8];
40 
42 {
44  AVPacket *in;
45  int i, j, ret, marker;
46  int is_superframe = !!s->buffer_pkt->data;
47 
48  if (!s->buffer_pkt->data) {
49  ret = ff_bsf_get_packet_ref(ctx, s->buffer_pkt);
50  if (ret < 0)
51  return ret;
52  in = s->buffer_pkt;
53 
54  marker = in->data[in->size - 1];
55  if ((marker & 0xe0) == 0xc0) {
56  int length_size = 1 + ((marker >> 3) & 0x3);
57  int nb_frames = 1 + (marker & 0x7);
58  int idx_size = 2 + nb_frames * length_size;
59 
60  if (in->size >= idx_size && in->data[in->size - idx_size] == marker) {
61  GetByteContext bc;
62  int64_t total_size = 0;
63 
64  bytestream2_init(&bc, in->data + in->size + 1 - idx_size,
65  nb_frames * length_size);
66 
67  for (i = 0; i < nb_frames; i++) {
68  int frame_size = 0;
69  for (j = 0; j < length_size; j++)
70  frame_size |= bytestream2_get_byte(&bc) << (j * 8);
71 
72  total_size += frame_size;
73  if (frame_size < 0 || total_size > in->size - idx_size) {
74  av_log(ctx, AV_LOG_ERROR,
75  "Invalid frame size in a superframe: %d\n", frame_size);
76  ret = AVERROR(EINVAL);
77  goto fail;
78  }
79  s->sizes[i] = frame_size;
80  }
81  s->nb_frames = nb_frames;
82  s->next_frame = 0;
83  s->next_frame_offset = 0;
84  is_superframe = 1;
85  }
86  }
87  }
88 
89  if (is_superframe) {
90  GetBitContext gb;
91  int profile, invisible = 0;
92 
93  ret = av_packet_ref(out, s->buffer_pkt);
94  if (ret < 0)
95  goto fail;
96 
97  out->data += s->next_frame_offset;
98  out->size = s->sizes[s->next_frame];
99 
100  s->next_frame_offset += out->size;
101  s->next_frame++;
102 
103  if (s->next_frame >= s->nb_frames)
105 
106  ret = init_get_bits8(&gb, out->data, out->size);
107  if (ret < 0)
108  goto fail;
109 
110  get_bits(&gb, 2); // frame_marker
111  profile = get_bits1(&gb);
112  profile |= get_bits1(&gb) << 1;
113  if (profile == 3)
114  get_bits1(&gb);
115  if (!get_bits1(&gb)) {
116  get_bits1(&gb);
117  invisible = !get_bits1(&gb);
118  }
119 
120  if (invisible)
121  out->pts = AV_NOPTS_VALUE;
122 
123  } else {
125  }
126 
127  return 0;
128 fail:
129  if (ret < 0)
130  av_packet_unref(out);
132  return ret;
133 }
134 
136 {
137  VP9SFSplitContext *s = ctx->priv_data;
138 
140  if (!s->buffer_pkt)
141  return AVERROR(ENOMEM);
142 
143  return 0;
144 }
145 
147 {
148  VP9SFSplitContext *s = ctx->priv_data;
150 }
151 
153 {
154  VP9SFSplitContext *s = ctx->priv_data;
156 }
157 
159  .name = "vp9_superframe_split",
160  .priv_data_size = sizeof(VP9SFSplitContext),
165  .codec_ids = (const enum AVCodecID []){ AV_CODEC_ID_VP9, AV_CODEC_ID_NONE },
166 };
static void flush(AVCodecContext *avctx)
const AVBitStreamFilter ff_vp9_superframe_split_bsf
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
The bitstream filter state.
Definition: avcodec.h:5759
int size
Definition: avcodec.h:1478
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
void * priv_data
Opaque filter-specific private data.
Definition: avcodec.h:5780
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:62
const char * name
Definition: avcodec.h:5809
uint8_t * data
Definition: avcodec.h:1477
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
Definition: avpacket.c:655
bitstream reader API header.
#define av_log(a,...)
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:608
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:260
AVCodecID
Identify the syntax and semantics of the bitstream.
Definition: avcodec.h:215
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
static int vp9_superframe_split_init(AVBSFContext *ctx)
static void vp9_superframe_split_uninit(AVBSFContext *ctx)
#define fail()
Definition: checkasm.h:120
static void vp9_superframe_split_flush(AVBSFContext *ctx)
AVFormatContext * ctx
Definition: movenc.c:48
#define s(width, name)
Definition: cbs_vp9.c:257
int frame_size
Definition: mxfenc.c:2216
Libavcodec external API header.
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:599
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
static int vp9_superframe_split_filter(AVBSFContext *ctx, AVPacket *out)
mfxU16 profile
Definition: qsvenc.c:44
static enum AVCodecID codec_ids[]
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: avpacket.c:51
FILE * out
Definition: movenc.c:54
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
This structure stores compressed data.
Definition: avcodec.h:1454
int ff_bsf_get_packet_ref(AVBSFContext *ctx, AVPacket *pkt)
Called by bitstream filters to get packet for filtering.
Definition: bsf.c:238
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1470
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248