[FFmpeg-devel] [PATCH] examples/muxing: prefer AVPicture to AVFrame, when feasible

Clément Bœsch ubitux at gmail.com
Tue Sep 11 18:51:06 CEST 2012


On Tue, Sep 11, 2012 at 06:19:17PM +0200, Stefano Sabatini wrote:
> Favor the use of plain AVPicture over AVFrame, especially when the use is
> not required like in the case of tmp_picture.
> 
> Also adopt more straightforward names, to avoid frame/picture confusion.
> ---
>  doc/examples/muxing.c |   57 ++++++++++++++++++++++++-------------------------
>  1 files changed, 28 insertions(+), 29 deletions(-)
> 
> diff --git a/doc/examples/muxing.c b/doc/examples/muxing.c
> index a91be44..2bf141c 100644
> --- a/doc/examples/muxing.c
> +++ b/doc/examples/muxing.c
> @@ -213,20 +213,14 @@ static void close_audio(AVFormatContext *oc, AVStream *st)
>  /**************************************************************/
>  /* video output */
>  
> -static AVFrame *picture, *tmp_picture;
> +static AVFrame *frame;
> +static AVPicture src_picture, dst_picture;
>  static uint8_t *video_outbuf;
>  static int frame_count, video_outbuf_size;
>  
> -static AVFrame *alloc_picture(enum PixelFormat pix_fmt, int width, int height)
> -{
> -    AVFrame *picture = avcodec_alloc_frame();
> -    if (!picture || avpicture_alloc((AVPicture *)picture, pix_fmt, width, height) < 0)
> -        av_freep(&picture);
> -    return picture;
> -}
> -
>  static void open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st)
>  {
> +    int ret;
>      AVCodecContext *c;
>  
>      c = st->codec;
> @@ -249,9 +243,15 @@ static void open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st)
>          video_outbuf      = av_malloc(video_outbuf_size);
>      }
>  
> +    frame = avcodec_alloc_frame();
> +    if (!frame) {
> +        fprintf(stderr, "Could not allocate video frame\n");
> +        exit(1);

Note: I know we do that in all the examples, but I believe most of the
users will try to exit cleanly their app like we do. At some point, it
would be nice to show how to quit in any state. As a developer this is
something we generally care about ("can I call this destruct function even
if the pointer is NULL or if the context is in a bad state?").

> +    }
> +

nit: would it make sense to add a comment above this chunk, such as
"Allocate and init a re-usable frame"? This might avoid users allocating a
frame for each new picture (that example doesn't show this).

>      /* Allocate the encoded raw picture. */
> -    picture = alloc_picture(c->pix_fmt, c->width, c->height);
> -    if (!picture) {
> +    ret = avpicture_alloc(&dst_picture, c->pix_fmt, c->width, c->height);
> +    if (ret < 0) {
>          fprintf(stderr, "Could not allocate picture\n");
>          exit(1);
>      }
> @@ -259,18 +259,20 @@ static void open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st)
>      /* If the output format is not YUV420P, then a temporary YUV420P
>       * picture is needed too. It is then converted to the required
>       * output format. */
> -    tmp_picture = NULL;
>      if (c->pix_fmt != PIX_FMT_YUV420P) {
> -        tmp_picture = alloc_picture(PIX_FMT_YUV420P, c->width, c->height);
> -        if (!tmp_picture) {
> +        ret = avpicture_alloc(&src_picture, PIX_FMT_YUV420P, c->width, c->height);
> +        if (ret < 0) {
>              fprintf(stderr, "Could not allocate temporary picture\n");
>              exit(1);
>          }
>      }
> +
> +    /* copy picture pointers to the frame */
> +    *((AVPicture *)frame) = dst_picture;

I wasn't aware the API worked like this, is it considered a hack or it's
the way we are expected to deal with the API?

Would it be possible to extend a bit the comment above? (for example
explaining what fields are in common and will be kept that way)

>  }
>  
>  /* Prepare a dummy image. */
> -static void fill_yuv_image(AVFrame *pict, int frame_index,
> +static void fill_yuv_image(AVPicture *pict, int frame_index,
>                             int width, int height)
>  {
>      int x, y, i;
> @@ -319,12 +321,12 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st)
>                      exit(1);
>                  }
>              }
> -            fill_yuv_image(tmp_picture, frame_count, c->width, c->height);
> +            fill_yuv_image(&src_picture, frame_count, c->width, c->height);
>              sws_scale(img_convert_ctx,
> -                      (const uint8_t * const *)tmp_picture->data, tmp_picture->linesize,
> -                      0, c->height, picture->data, picture->linesize);
> +                      (const uint8_t * const *)src_picture.data, src_picture.linesize,
> +                      0, c->height, dst_picture.data, dst_picture.linesize);
>          } else {
> -            fill_yuv_image(picture, frame_count, c->width, c->height);
> +            fill_yuv_image(&dst_picture, frame_count, c->width, c->height);
>          }
>      }
>  
> @@ -336,7 +338,7 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st)
>  
>          pkt.flags        |= AV_PKT_FLAG_KEY;
>          pkt.stream_index  = st->index;
> -        pkt.data          = (uint8_t *)picture;
> +        pkt.data          = dst_picture.data[0];
>          pkt.size          = sizeof(AVPicture);
>  
>          ret = av_interleaved_write_frame(oc, &pkt);
> @@ -349,7 +351,7 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st)
>          pkt.data = NULL;    // packet data will be allocated by the encoder
>          pkt.size = 0;
>  
> -        ret = avcodec_encode_video2(c, &pkt, picture, &got_output);
> +        ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
>          if (ret < 0) {
>              fprintf(stderr, "error encoding frame\n");
>              exit(1);
> @@ -381,12 +383,9 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st)
>  static void close_video(AVFormatContext *oc, AVStream *st)
>  {
>      avcodec_close(st->codec);
> -    av_free(picture->data[0]);
> -    av_free(picture);
> -    if (tmp_picture) {
> -        av_free(tmp_picture->data[0]);
> -        av_free(tmp_picture);
> -    }
> +    av_free(src_picture.data[0]);
> +    av_free(dst_picture.data[0]);
> +    av_free(frame);
>      av_free(video_outbuf);
>  }
>  
> @@ -460,7 +459,7 @@ int main(int argc, char **argv)
>      /* Write the stream header, if any. */
>      avformat_write_header(oc, NULL);
>  
> -    picture->pts = 0;
> +    frame->pts = 0;
>      for (;;) {
>          /* Compute current audio and video time. */
>          if (audio_st)
> @@ -483,7 +482,7 @@ int main(int argc, char **argv)
>              write_audio_frame(oc, audio_st);
>          } else {
>              write_video_frame(oc, video_st);
> -            picture->pts++;
> +            frame->pts++;
>          }
>      }

Anyway, it looks good to me, but I'm not familiar with that code.

-- 
Clément B.
-------------- next part --------------
A non-text attachment was scrubbed...
Name: not available
Type: application/pgp-signature
Size: 490 bytes
Desc: not available
URL: <http://ffmpeg.org/pipermail/ffmpeg-devel/attachments/20120911/8699348b/attachment.asc>


More information about the ffmpeg-devel mailing list