[Libav-user] x264 encoding crashes on av_interleaved_write_frame

Christian Brümmer christian.bruemmer at gmx.de
Wed May 23 14:49:23 CEST 2012


Hi,

im new to libav and video compression at all. Im working on Windows 7 in 
Visual Studio 2010 and i try to simply encode a dummy image to x264 
using an actual Zeranoe-Build (x86).

First i tried to use the output-example which doesnt work with x264 out 
of the box. I put many code snippets together and often i really dont 
know what the code does - so my error might be anywhere.

So its saidly a lot of code but the main problem existing in 
*write_video_frame* where the program crashes on 
*av_interleaved_write_frame* (so you may have a look on that)! Encoding 
works so far (as far as i can say -> no errors shown)- takes me a lot of 
time and i got help from zeranoe forum (thread: 
http://ffmpeg.zeranoe.com/forum/viewtopic.php?f=15&t=534&p=1581#p1581).

I really need ffmpeg / libav for x264 encoding in my project @ 
university and i dont know what's wrong and to do :/!



#include <iostream>
#include <string>

#include <stdlib.h>
#include <stdio.h>


extern "C" {
         #include <avcodec.h>
         #include <avformat.h>
         #include <swscale.h>
         #include <avio.h>
         #include <opt.h>
         #include "libavutil/imgutils.h"
}

#define WIDTH 800
#define HEIGHT 480
#define STREAM_NB_FRAMES  ((int)(STREAM_DURATION * FRAME_RATE))
#define FRAME_RATE 25
#define PIXEL_FORMAT PIX_FMT_YUV420P
#define STREAM_DURATION 1 //seconds
#define BIT_RATE 400000

static int sws_flags = SWS_BICUBIC;

AVFrame *picture, *tmp_picture;
uint8_t *video_outbuf;
int frame_count, video_outbuf_size;

static void closeVideo(AVFormatContext *oc, AVStream *st)
{
     avcodec_close(st->codec);
     av_free(picture->data[0]);
     av_free(picture);
     if (tmp_picture)
     {
         av_free(tmp_picture->data[0]);
         av_free(tmp_picture);
     }
     av_free(video_outbuf);
}

static AVFrame *alloc_picture(enum PixelFormat pix_fmt, int width, int 
height)
{
     AVFrame *picture;
     uint8_t *picture_buf;
     int size;

     picture = avcodec_alloc_frame();
     if(!picture)
         return NULL;
     size = avpicture_get_size(pix_fmt, width, height);
     picture_buf = (uint8_t*)(av_malloc(size));
     if (!picture_buf)
     {
         av_free(picture);
         return NULL;
     }
     avpicture_fill((AVPicture *) picture, picture_buf, pix_fmt, WIDTH, 
HEIGHT);
     return picture;
}

static void openVideo(AVFormatContext *oc, AVStream *st)
{
     AVCodec *codec;
     AVCodecContext *c;

     c = st->codec;
     //if(c->idct_algo == CodecID::CODEC_ID_H264)
     //    av_opt_set(c->priv_data, "preset", "slow", 0);

     codec = avcodec_find_encoder(c->codec_id);
     if(!codec)
     {
         std::cout << "Codec not found." << std::endl;
         std::cin.get();std::cin.get();exit(1);
     }

     if(avcodec_get_context_defaults3 (c, codec) < 0)
     {
         std::cout << "Cannot get default codec context! \n" << std::endl;
         std::cin.get();
         exit(1);
     }
     c->bit_rate = BIT_RATE;
     c->width = WIDTH;
     c->height = HEIGHT;
     c->time_base.den = FRAME_RATE;
     c->time_base.num = 1;
     c->gop_size = FRAME_RATE;
     c->pix_fmt = PIX_FMT_YUV420P;

     if(oc->oformat->flags & AVFMT_GLOBALHEADER)
         c->flags |= CODEC_FLAG_GLOBAL_HEADER;

     if(avcodec_open2(c, codec, NULL) < 0)
     {
         std::cout << "Could not open codec." << std::endl;
         std::cin.get();std::cin.get();exit(1);
     }
     video_outbuf = NULL;
     if(!(oc->oformat->flags & AVFMT_RAWPICTURE))
     {
         video_outbuf_size = 200000;
         video_outbuf = (uint8_t*)(av_malloc(video_outbuf_size));
     }
     picture = alloc_picture(c->pix_fmt, c->width, c->height);
     if(!picture)
     {
         std::cout << "Could not allocate picture" << std::endl;
         std::cin.get();exit(1);
     }
     tmp_picture = NULL;
     if(c->pix_fmt != PIX_FMT_YUV420P)
     {
         tmp_picture = alloc_picture(PIX_FMT_YUV420P, WIDTH, HEIGHT);
         if(!tmp_picture)
         {
             std::cout << " Could not allocate temporary picture" << 
std::endl;
             std::cin.get();exit(1);
         }
     }
}


static AVStream* addVideoStream(AVFormatContext *context, enum CodecID 
codecID)
{
     AVCodecContext *codec;
     AVStream *stream;
     stream = av_new_stream(context, 0);
     if(!stream)
     {
         std::cout << "Could not alloc stream." << std::endl;
         std::cin.get();exit(1);
     }

     codec = stream->codec;
     codec->codec_id = codecID;
     codec->codec_type = AVMEDIA_TYPE_VIDEO;

     // sample rate
     codec->bit_rate = BIT_RATE;
     // resolution must be a multiple of two
     codec->width = WIDTH;
     codec->height = HEIGHT;
     codec->time_base.den = FRAME_RATE; // stream fps
     codec->time_base.num = 1;
     codec->gop_size = FRAME_RATE; // intra frame every twelve frames at 
most
     codec->pix_fmt = PIXEL_FORMAT;
     if(codec->codec_id == CODEC_ID_MPEG2VIDEO)
         codec->max_b_frames = 2; // for testing, B frames

     if(codec->codec_id == CODEC_ID_MPEG1VIDEO)
         codec->mb_decision = 2;

     if(context->oformat->flags & AVFMT_GLOBALHEADER)
         codec->flags |= CODEC_FLAG_GLOBAL_HEADER;

     return stream;
}

static void fill_yuv_image(AVFrame *pict, int frame_index, int width, 
int height)
{
     int x, y, i;
     i = frame_index;

     /* Y */
     for(y=0;y<height;y++) {
         for(x=0;x<width;x++) {
             pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
         }
     }

     /* Cb and Cr */
     for(y=0;y<height/2;y++) {
         for(x=0;x<width/2;x++) {
             pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
             pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
         }
     }
}

static void write_video_frame(AVFormatContext *oc, AVStream *st)
{
     int out_size, ret;
     AVCodecContext *c;
     static struct SwsContext *img_convert_ctx;
     c = st->codec;

     if(frame_count >= STREAM_NB_FRAMES)
     {

     }
     else
     {
         if(c->pix_fmt != PIX_FMT_YUV420P)
         {
             if(img_convert_ctx = NULL)
             {
                 img_convert_ctx = sws_getContext(WIDTH, HEIGHT, 
PIX_FMT_YUV420P, WIDTH, HEIGHT,
                                                 c->pix_fmt, sws_flags, 
NULL, NULL, NULL);
                 if(img_convert_ctx == NULL)
                 {
                     std::cout << "Cannot initialize the conversion 
context" << std::endl;
                     std::cin.get();exit(1);
                 }
             }
             fill_yuv_image(tmp_picture, frame_count, WIDTH, HEIGHT);
             sws_scale(img_convert_ctx, tmp_picture->data, 
tmp_picture->linesize, 0, HEIGHT,
                         picture->data, picture->linesize);
         }
         else
         {
             fill_yuv_image(picture, frame_count, WIDTH, HEIGHT);
         }
     }

     if (oc->oformat->flags & AVFMT_RAWPICTURE) {
         /* raw video case. The API will change slightly in the near
            futur for that */
         AVPacket pkt;
         av_init_packet(&pkt);

         pkt.flags |= AV_PKT_FLAG_KEY;
         pkt.stream_index= st->index;
         pkt.data= (uint8_t *)picture;
         pkt.size= sizeof(AVPicture);


         ret = av_interleaved_write_frame(oc, &pkt);
     } else {
         /* encode the image */
         picture->pts = (float) frame_count * 
(1000.0/(float)(FRAME_RATE)) * 90;
         int result, result2;
         AVPacket pkt;
         av_init_packet(&pkt);
         pkt.dts = AV_NOPTS_VALUE;
         pkt.stream_index = st->index;
         pkt.data = video_outbuf;
         pkt.size = video_outbuf_size;
         pkt.duration = 0;
         result2 = avcodec_encode_video2(c, &pkt, picture, &result);
         if (result == 1 && result2 == 0) {
             ret = av_interleaved_write_frame(oc, &pkt); // CRASH !!!
         } else {
             ret = 0;
         }
     }
     if (ret != 0) {
         std::cout << "Error while writing video frames" << std::endl;
         std::cin.get();exit(1);
     }
     frame_count++;
}

int main(int argc, char** argv)
{
     const char* filename = "test.h264";
     AVOutputFormat *outputFormat;
     AVFormatContext *context;
     AVCodecContext *codec;
     AVStream *videoStream;
     double videoPTS;

     // init libavcodec, register all codecs and formats
     av_register_all();
     avcodec_register_all();
     // auto detect the output format from the name
     outputFormat = av_guess_format(NULL, filename, NULL);
     if(!outputFormat)
     {
         std::cout << "Cannot guess output format! Using mpeg!" << 
std::endl;
         std::cin.get();
         outputFormat = av_guess_format(NULL, "h263" , NULL);
     }
     if(!outputFormat)
     {
         std::cout << "Could not find suitable output format." << std::endl;
         std::cin.get();exit(1);
     }

     context = avformat_alloc_context();
     if(!context)
     {
         std::cout << "Cannot allocate avformat memory." << std::endl;
         std::cin.get();exit(1);
     }
     context->oformat = outputFormat;
     sprintf_s(context->filename, sizeof(context->filename), "%s", 
filename);
     std::cout << "Is '" << context->filename << "' = '" << filename << 
"'" << std::endl;

     videoStream = NULL;
     outputFormat->audio_codec = CODEC_ID_NONE;
     videoStream = addVideoStream(context, outputFormat->video_codec);

     /* still needed?
     if(av_set_parameters(context, NULL) < 0)
     {
         std::cout << "Invalid output format parameters." << std::endl;
         exit(0);
     }*/

     av_dump_format(context, 0, filename, 1);

     if(videoStream)
         openVideo(context, videoStream);

     if(!outputFormat->flags & AVFMT_NOFILE)
     {
         if(avio_open(&context->pb, filename, AVIO_FLAG_READ_WRITE) < 0)
         {
             std::cout << "Could not open " << filename << std::endl;
             std::cin.get();exit(1);
         }
     }

     avformat_write_header(context, 0);

     while(true)
     {
         if(videoStream)
             videoPTS = (double) videoStream->pts.val * 
videoStream->time_base.num / videoStream->time_base.den;
         else
             videoPTS = 0.;

         if((!videoStream || videoPTS >= STREAM_DURATION))
         {
             break;
         }
         write_video_frame(context, videoStream);
     }
     av_write_trailer(context);
     if(videoStream)
         closeVideo(context, videoStream);
     for(int i = 0; i < context->nb_streams; i++)
     {
         av_freep(&context->streams[i]->codec);
         av_freep(&context->streams[i]);
     }

     if(!(outputFormat->flags & AVFMT_NOFILE))
     {
         avio_close(context->pb);
     }
     av_free(context);
     std::cin.get();
     return 0;
};
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://ffmpeg.org/pipermail/libav-user/attachments/20120523/99a8ac2f/attachment.html>


More information about the Libav-user mailing list