FFmpeg
mpeg4_unpack_bframes_bsf.c
Go to the documentation of this file.
1 /*
2  * Bitstream filter for unpacking DivX-style packed B-frames in MPEG-4 (divx_packed)
3  * Copyright (c) 2015 Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "avcodec.h"
23 #include "bsf.h"
24 #include "internal.h"
25 #include "mpeg4video.h"
26 
27 typedef struct UnpackBFramesBSFContext {
30 
31 /* determine the position of the packed marker in the userdata,
32  * the number of VOPs and the position of the second VOP */
33 static void scan_buffer(const uint8_t *buf, int buf_size,
34  int *pos_p, int *nb_vop, int *pos_vop2) {
35  uint32_t startcode;
36  const uint8_t *end = buf + buf_size, *pos = buf;
37 
38  while (pos < end) {
39  startcode = -1;
40  pos = avpriv_find_start_code(pos, end, &startcode);
41 
42  if (startcode == USER_DATA_STARTCODE && pos_p) {
43  /* check if the (DivX) userdata string ends with 'p' (packed) */
44  for (int i = 0; i < 255 && pos + i + 1 < end; i++) {
45  if (pos[i] == 'p' && pos[i + 1] == '\0') {
46  *pos_p = pos + i - buf;
47  break;
48  }
49  }
50  } else if (startcode == VOP_STARTCODE && nb_vop) {
51  *nb_vop += 1;
52  if (*nb_vop == 2 && pos_vop2) {
53  *pos_vop2 = pos - buf - 4; /* subtract 4 bytes startcode */
54  }
55  }
56  }
57 }
58 
60 {
62  int pos_p = -1, nb_vop = 0, pos_vop2 = -1, ret = 0;
63  AVPacket *in;
64 
65  ret = ff_bsf_get_packet(ctx, &in);
66  if (ret < 0)
67  return ret;
68 
69  scan_buffer(in->data, in->size, &pos_p, &nb_vop, &pos_vop2);
70  av_log(ctx, AV_LOG_DEBUG, "Found %d VOP startcode(s) in this packet.\n", nb_vop);
71 
72  if (pos_vop2 >= 0) {
73  if (s->b_frame->data) {
75  "Missing one N-VOP packet, discarding one B-frame.\n");
77  }
78  /* store the packed B-frame in the BSFContext */
79  ret = av_packet_ref(s->b_frame, in);
80  if (ret < 0) {
81  goto fail;
82  }
83  s->b_frame->size -= pos_vop2;
84  s->b_frame->data += pos_vop2;
85  }
86 
87  if (nb_vop > 2) {
89  "Found %d VOP headers in one packet, only unpacking one.\n", nb_vop);
90  }
91 
92  if (nb_vop == 1 && s->b_frame->data) {
93  /* use frame from BSFContext */
94  av_packet_move_ref(out, s->b_frame);
95 
96  /* use properties from current input packet */
97  ret = av_packet_copy_props(out, in);
98  if (ret < 0) {
99  goto fail;
100  }
101 
102  if (in->size <= MAX_NVOP_SIZE) {
103  /* N-VOP */
104  av_log(ctx, AV_LOG_DEBUG, "Skipping N-VOP.\n");
105  } else {
106  /* copy packet into BSFContext */
107  av_packet_move_ref(s->b_frame, in);
108  }
109  } else if (nb_vop >= 2) {
110  /* use first frame of the packet */
111  av_packet_move_ref(out, in);
112  out->size = pos_vop2;
113  } else if (pos_p >= 0) {
115  if (ret < 0)
116  goto fail;
117  av_log(ctx, AV_LOG_DEBUG, "Updating DivX userdata (remove trailing 'p').\n");
118  av_packet_move_ref(out, in);
119  /* remove 'p' (packed) from the end of the (DivX) userdata string */
120  out->data[pos_p] = '\0';
121  } else {
122  /* copy packet */
123  av_packet_move_ref(out, in);
124  }
125 
126 fail:
127  if (ret < 0)
128  av_packet_unref(out);
129  av_packet_free(&in);
130 
131  return ret;
132 }
133 
135 {
137 
138  s->b_frame = av_packet_alloc();
139  if (!s->b_frame)
140  return AVERROR(ENOMEM);
141 
142  if (ctx->par_in->extradata) {
143  int pos_p_ext = -1;
144  scan_buffer(ctx->par_in->extradata, ctx->par_in->extradata_size, &pos_p_ext, NULL, NULL);
145  if (pos_p_ext >= 0) {
146  av_log(ctx, AV_LOG_DEBUG,
147  "Updating DivX userdata (remove trailing 'p') in extradata.\n");
148  ctx->par_out->extradata[pos_p_ext] = '\0';
149  }
150  }
151 
152  return 0;
153 }
154 
156 {
158  av_packet_unref(ctx->b_frame);
159 }
160 
162 {
164  av_packet_free(&ctx->b_frame);
165 }
166 
167 static const enum AVCodecID codec_ids[] = {
169 };
170 
172  .name = "mpeg4_unpack_bframes",
173  .priv_data_size = sizeof(UnpackBFramesBSFContext),
178  .codec_ids = codec_ids,
179 };
#define NULL
Definition: coverity.c:32
AVCodecParameters * par_out
Parameters of the output stream.
Definition: avcodec.h:5793
static void flush(AVCodecContext *avctx)
static void mpeg4_unpack_bframes_close(AVBSFContext *bsfc)
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
static int mpeg4_unpack_bframes_filter(AVBSFContext *ctx, AVPacket *out)
The bitstream filter state.
Definition: avcodec.h:5759
int size
Definition: avcodec.h:1478
void * priv_data
Opaque filter-specific private data.
Definition: avcodec.h:5780
#define USER_DATA_STARTCODE
Definition: mpeg4video.h:62
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:62
uint8_t
const AVBitStreamFilter ff_mpeg4_unpack_bframes_bsf
#define VOP_STARTCODE
Definition: mpeg4video.h:65
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
const char * name
Definition: avcodec.h:5809
uint8_t * data
Definition: avcodec.h:1477
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
Definition: avpacket.c:655
#define av_log(a,...)
const uint8_t * avpriv_find_start_code(const uint8_t *p, const uint8_t *end, uint32_t *state)
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:608
int av_packet_make_writable(AVPacket *pkt)
Create a writable reference for the data described by a given packet, avoiding data copy if possible...
Definition: avpacket.c:682
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:260
AVCodecID
Identify the syntax and semantics of the bitstream.
Definition: avcodec.h:215
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
#define fail()
Definition: checkasm.h:120
int extradata_size
Size of the extradata content in bytes.
Definition: avcodec.h:3971
int av_packet_copy_props(AVPacket *dst, const AVPacket *src)
Copy only "properties" fields from src to dst.
Definition: avpacket.c:565
AVFormatContext * ctx
Definition: movenc.c:48
static int mpeg4_unpack_bframes_init(AVBSFContext *ctx)
#define s(width, name)
Definition: cbs_vp9.c:257
Libavcodec external API header.
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:599
void * buf
Definition: avisynth_c.h:766
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
static void scan_buffer(const uint8_t *buf, int buf_size, int *pos_p, int *nb_vop, int *pos_vop2)
static enum AVCodecID codec_ids[]
static void mpeg4_unpack_bframes_flush(AVBSFContext *bsfc)
common internal api header.
int ff_bsf_get_packet(AVBSFContext *ctx, AVPacket **pkt)
Called by the bitstream filters to get the next packet for filtering.
Definition: bsf.c:216
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: avpacket.c:51
uint8_t * extradata
Extra binary data needed for initializing the decoder, codec-dependent.
Definition: avcodec.h:3967
FILE * out
Definition: movenc.c:54
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
This structure stores compressed data.
Definition: avcodec.h:1454
AVCodecParameters * par_in
Parameters of the input stream.
Definition: avcodec.h:5787
#define MAX_NVOP_SIZE
Definition: mpeg4video.h:72