[Libav-user] Problem with using libavcodec with AV_CODEC_ID_H264

Harald Jordan harald.jordan at redstream.at
Sat Feb 9 20:58:42 CET 2013


I prefer using the mux example for playing around with encoding stuff. Here
is a version that I slightly altered especially for you. Additionally I it
as a text file in attachment (don't know if this works).

As you can see in the include section, it is a Visual studio (2010) one and
C++. It is tested only to encode h264+aac and flush the video encoder, this
worked. The part that allows to set the audio_codec_string manually is not
implemented by now, its just begun. 

Using ffmpeg-20120810-git-633b90c-win64-dev

 

/*

* Libavformat API example: Output a media file in any supported

* libavformat format. The default codecs are used.

*

* Copyright (c) 2003 Fabrice Bellard

*

* Permission is hereby granted, free of charge, to any person obtaining a
copy

* of this software and associated documentation files (the "Software"), to
deal

* in the Software without restriction, including without limitation the
rights

* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell

* copies of the Software, and to permit persons to whom the Software is

* furnished to do so, subject to the following conditions:

*

* The above copyright notice and this permission notice shall be included in

* all copies or substantial portions of the Software.

*

* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR

* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,

* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL

* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER

* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM,

* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN

* THE SOFTWARE.

*/

#include "stdafx.h"

#include "inttypes.h"

#include <stdlib.h>

#include <stdio.h>

#include <string.h>

#include <math.h>

 

extern "C" {       

#include <avcodec.h>        

#include <avformat.h>        

#include <swscale.h>

#include <avcodec.h>

#include <libswresample\swresample.h>

#include <include\libavutil\samplefmt.h>

#include <libavutil/audioconvert.h>

 

}

 

#undef exit

 

 

 

const char * audio_codec_string = "pcm_s16le"; //set audio and video codec
manually, 

const char * video_codec_string = "libx264";

    const char *filename="c:\\out.mov"; //format is deduced from file
extension

 

/* 5 seconds stream duration */

#define STREAM_DURATION   200.0

#define STREAM_FRAME_RATE 25 /* 25 images/s */

#define STREAM_NB_FRAMES  ((int)(STREAM_DURATION * STREAM_FRAME_RATE))

#define STREAM_PIX_FMT    PIX_FMT_YUV420P /* default pix_fmt */

 

static int sws_flags = SWS_BICUBIC;

 

/**************************************************************/

/* audio output */

 

static float t, tincr, tincr2;

static int16_t *samples;

static int audio_input_frame_size;

 

/*

* add an audio output stream

*/

 

static AVStream *add_audio_stream(AVFormatContext *oc, enum AVCodecID
codec_id)

{

         /*

  * add an audio output stream

  */

     AVCodecContext *c;

     AVStream *st;

     st = av_new_stream(oc, 1);

     if (!st) {

         fprintf(stderr, "Could not alloc stream\n");

         exit(130);

     }

     c = st->codec;

     c->codec_id = codec_id;

     c->codec_type = AVMEDIA_TYPE_AUDIO;

       c->strict_std_compliance =  FF_COMPLIANCE_EXPERIMENTAL; //for aac

     /* put sample parameters */

           AVCodec* out_codec = avcodec_find_encoder(codec_id);

       c->sample_fmt = out_codec->sample_fmts[0];

       

 

 

     c->bit_rate = 64000;

     c->sample_rate = 22050;

     c->channels = 1;

     // some formats want stream headers to be separate

     if(oc->oformat->flags & AVFMT_GLOBALHEADER)

         c->flags |= CODEC_FLAG_GLOBAL_HEADER;

     return st;

}

 

static void open_audio(AVFormatContext *oc, AVStream *st)

{

    AVCodecContext *c;

 

    c = st->codec;

       c->codec = avcodec_find_encoder(c->codec_id); //original

 

       ;

    /* open it */

    if (avcodec_open2(c, c->codec, NULL) < 0) {

        fprintf(stderr, "could not open codec\n");

        exit(301);

    }

 

    /* init signal generator */

    t     = 0;

    tincr = 2 * M_PI * 110.0 / c->sample_rate;

    /* increment frequency by 110 Hz per second */

    tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;

 

    if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)

        audio_input_frame_size = 10000;

    else

        audio_input_frame_size = c->frame_size;

    samples = (int16_t *)av_malloc(audio_input_frame_size *

                        av_get_bytes_per_sample(c->sample_fmt) *

                        c->channels);

}

 

/* Prepare a 16 bit dummy audio frame of 'frame_size' samples and

* 'nb_channels' channels. */

static void get_audio_frame(int16_t *samples, int frame_size, int
nb_channels)

{

    int j, i, v;

    int16_t *q;

 

    q = samples;

    for (j = 0; j < frame_size; j++) {

        v = (int)(sin(t) * 10000);

        for (i = 0; i < nb_channels; i++)

            *q++ = v;

        t     += tincr;

        tincr += tincr2;

    }

}

 

static void write_audio_frame(AVFormatContext *oc, AVStream *st)

{

    AVCodecContext *c;

    AVPacket pkt = { 0 }; // data and size must be 0;

    AVFrame *frame = avcodec_alloc_frame();

    int got_packet;

 

    av_init_packet(&pkt);

    c = st->codec;

 

    get_audio_frame(samples, audio_input_frame_size, c->channels);

    frame->nb_samples = audio_input_frame_size;

    avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,

                             (uint8_t *)samples,

                             audio_input_frame_size *

                             av_get_bytes_per_sample(c->sample_fmt) *

                             c->channels, 1);

 

    avcodec_encode_audio2(c, &pkt, frame, &got_packet);

    if (!got_packet)

        return;

 

    pkt.stream_index = st->index;

 

    /* Write the compressed frame to the media file. */

    if (av_interleaved_write_frame(oc, &pkt) != 0) {

        fprintf(stderr, "Error while writing audio frame\n");

        exit(1);

    }

}

 

static void close_audio(AVFormatContext *oc, AVStream *st)

{

    avcodec_close(st->codec);

 

    av_free(samples);

}

 

/**************************************************************/

/* video output */

 

static AVFrame *picture, *tmp_picture;

static uint8_t *video_outbuf;

static int frame_count, video_outbuf_size;

 

/* Add a video output stream. */

static AVStream *add_video_stream(AVFormatContext *oc, enum CodecID
codec_id)

{

    AVCodecContext *c;

    AVStream *st;

    AVCodec *codec;

 

    /* find the video encoder */

    codec = avcodec_find_encoder((AVCodecID)codec_id);

    if (!codec) {

        fprintf(stderr, "codec not found\n");

        exit(1);

    }

 

    st = avformat_new_stream(oc, codec);

    if (!st) {

        fprintf(stderr, "Could not alloc stream\n");

        exit(1);

    }

 

    c = st->codec;

 

    /* find the video encoder */

    codec = avcodec_find_encoder((AVCodecID)codec_id);

    if (!codec) {

        fprintf(stderr, "codec not found\n");

        exit(1);

    }

    avcodec_get_context_defaults3(c, codec);

 

    c->codec_id = (AVCodecID)codec_id;

 

    /* Put sample parameters. */

    c->bit_rate = 400000;

    /* Resolution must be a multiple of two. */

    c->width    = 352;

    c->height   = 288;

    /* timebase: This is the fundamental unit of time (in seconds) in terms

     * of which frame timestamps are represented. For fixed-fps content,

     * timebase should be 1/framerate and timestamp increments should be

     * identical to 1. */

    c->time_base.den = STREAM_FRAME_RATE;

    c->time_base.num = 1;

    c->gop_size      = 12; /* emit one intra frame every twelve frames at
most */

    c->pix_fmt       = STREAM_PIX_FMT;

    if (c->codec_id == CODEC_ID_MPEG2VIDEO) {

        /* just for testing, we also add B frames */

        c->max_b_frames = 2;

    }

    if (c->codec_id == CODEC_ID_MPEG1VIDEO) {

        /* Needed to avoid using macroblocks in which some coeffs overflow.

         * This does not happen with normal video, it just happens here as

         * the motion of the chroma plane does not match the luma plane. */

        c->mb_decision = 2;

    }

    /* Some formats want stream headers to be separate. */

    if (oc->oformat->flags & AVFMT_GLOBALHEADER)

        c->flags |= CODEC_FLAG_GLOBAL_HEADER;

 

    return st;

}

 

static AVFrame *alloc_picture(enum PixelFormat pix_fmt, int width, int
height)

{

    AVFrame *picture;

    uint8_t *picture_buf;

    int size;

 

    picture = avcodec_alloc_frame();

    if (!picture)

        return NULL;

    size        = avpicture_get_size(pix_fmt, width, height);

    picture_buf = (uint8_t*)av_malloc(size);

    if (!picture_buf) {

        av_free(picture);

        return NULL;

    }

    avpicture_fill((AVPicture *)picture, picture_buf,

                   pix_fmt, width, height);

    return picture;

}

 

static void open_video(AVFormatContext *oc, AVStream *st)

{

    AVCodecContext *c;

 

    c = st->codec;

    c->codec = avcodec_find_encoder(c->codec_id);

    /* open the codec */

       if (avcodec_open2(c, c->codec, NULL) < 0) {

        fprintf(stderr, "could not open codec\n");

        exit(201);

    }

 

    video_outbuf = NULL;

    if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {

        /* Allocate output buffer. */

        /* XXX: API change will be done. */

        /* Buffers passed into lav* can be allocated any way you prefer,

         * as long as they're aligned enough for the architecture, and

         * they're freed appropriately (such as using av_free for buffers

         * allocated with av_malloc). */

        video_outbuf_size = 200000;

        video_outbuf      = (uint8_t*)av_malloc(video_outbuf_size);

    }

 

    /* Allocate the encoded raw picture. */

    picture = alloc_picture(c->pix_fmt, c->width, c->height);

    if (!picture) {

        fprintf(stderr, "Could not allocate picture\n");

        exit(1);

    }

 

    /* If the output format is not YUV420P, then a temporary YUV420P

     * picture is needed too. It is then converted to the required

     * output format. */

    tmp_picture = NULL;

    if (c->pix_fmt != PIX_FMT_YUV420P) {

        tmp_picture = alloc_picture(PIX_FMT_YUV420P, c->width, c->height);

        if (!tmp_picture) {

            fprintf(stderr, "Could not allocate temporary picture\n");

            exit(1);

        }

    }

}

 

/* Prepare a dummy image. */

static void fill_yuv_image(AVFrame *pict, int frame_index,

                           int width, int height)

{

    int x, y, i;

 

    i = frame_index;

 

    /* Y */

    for (y = 0; y < height; y++)

        for (x = 0; x < width; x++)

            pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;

 

    /* Cb and Cr */

    for (y = 0; y < height / 2; y++) {

        for (x = 0; x < width / 2; x++) {

            pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;

            pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;

        }

    }

}

 

static void write_video_frame(AVFormatContext *oc, AVStream *st)

{

    int out_size, ret;

    AVCodecContext *c;

    static struct SwsContext *img_convert_ctx;

 

    c = st->codec;

 

    if (frame_count >= STREAM_NB_FRAMES) {

        /* No more frames to compress. The codec has a latency of a few

         * frames if using B-frames, so we get the last frames by

         * passing the same picture again. */

    } else {

        if (c->pix_fmt != PIX_FMT_YUV420P) {

            /* as we only generate a YUV420P picture, we must convert it

             * to the codec pixel format if needed */

            if (img_convert_ctx == NULL) {

                img_convert_ctx = sws_getContext(c->width, c->height,

                                                 PIX_FMT_YUV420P,

                                                 c->width, c->height,

                                                 c->pix_fmt,

                                                 sws_flags, NULL, NULL,
NULL);

                if (img_convert_ctx == NULL) {

                    fprintf(stderr,

                            "Cannot initialize the conversion context\n");

                    exit(1);

                }

            }

            fill_yuv_image(tmp_picture, frame_count, c->width, c->height);

            sws_scale(img_convert_ctx, tmp_picture->data,
tmp_picture->linesize,

                      0, c->height, picture->data, picture->linesize);

        } else {

            fill_yuv_image(picture, frame_count, c->width, c->height);

        }

    }

 

    if (oc->oformat->flags & AVFMT_RAWPICTURE) {

        /* Raw video case - the API will change slightly in the near

         * future for that. */

        AVPacket pkt;

        av_init_packet(&pkt);

 

        pkt.flags        |= AV_PKT_FLAG_KEY;

        pkt.stream_index  = st->index;

        pkt.data          = (uint8_t *)picture;

        pkt.size          = sizeof(AVPicture);

 

        ret = av_interleaved_write_frame(oc, &pkt);

    } else {

        /* encode the image */

        out_size = avcodec_encode_video(c, video_outbuf,

                                        video_outbuf_size, picture);

        /* If size is zero, it means the image was buffered. */

        if (out_size > 0) {

            AVPacket pkt;

            av_init_packet(&pkt);

 

            if (c->coded_frame->pts != AV_NOPTS_VALUE)

                pkt.pts = av_rescale_q(c->coded_frame->pts,

                                       c->time_base, st->time_base);

            if (c->coded_frame->key_frame)

                pkt.flags |= AV_PKT_FLAG_KEY;

 

            pkt.stream_index = st->index;

            pkt.data         = video_outbuf;

            pkt.size         = out_size;

 

            /* Write the compressed frame to the media file. */

            ret = av_interleaved_write_frame(oc, &pkt);

        } else {

            ret = 0;

        }

    }

    if (ret != 0) {

        fprintf(stderr, "Error while writing video frame\n");

        exit(1);

    }

    frame_count++;

}

 

static void close_video(AVFormatContext *oc, AVStream *st)

{

    avcodec_close(st->codec);

    av_free(picture->data[0]);

    av_free(picture);

    if (tmp_picture) {

        av_free(tmp_picture->data[0]);

        av_free(tmp_picture);

    }

    av_free(video_outbuf);

}

 

/**************************************************************/

/* media file output */

 

int main(int argc, char **argv)

{

 

    AVOutputFormat *fmt;

    AVFormatContext *oc;

    AVStream *audio_st, *video_st;

    double audio_pts, video_pts;

    int i;

 

    /* Initialize libavcodec, and register all codecs and formats. */

    av_register_all();

 

    if (0==1) {//Deactivated, hardcoded

        printf("usage: %s output_file\n"

               "API example program to output a media file with
libavformat.\n"

               "The output format is automatically guessed according to the
file extension.\n"

               "Raw images can also be output by using '%%d' in the
filename\n"

               "\n", argv[0]);

        return 1;

    }

 

    

 

    /* allocate the output media context */

    avformat_alloc_output_context2(&oc, NULL, NULL, filename);

    if (!oc) {

        printf("Could not deduce output format from file extension: using
MPEG.\n");

        avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);

    }

    if (!oc) {

        return 101;

    }

    fmt = oc->oformat;

 

    /* Add the audio and video streams using the default format codecs

     * and initialize the codecs. */

    video_st = NULL;

    audio_st = NULL;

    if (fmt->video_codec != CODEC_ID_NONE) {

        video_st = add_video_stream(oc, (CodecID)fmt->video_codec);

    }

    if (fmt->audio_codec != CODEC_ID_NONE) {

        audio_st = add_audio_stream(oc, fmt->audio_codec);

    }

 

    /* Now that all the parameters are set, we can open the audio and

     * video codecs and allocate the necessary encode buffers. */

    if (video_st)

        open_video(oc, video_st);

    if (audio_st)

        open_audio(oc, audio_st);

 

    av_dump_format(oc, 0, filename, 1);

 

    /* open the output file, if needed */

    if (!(fmt->flags & AVFMT_NOFILE)) {

        if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) {

            fprintf(stderr, "Could not open '%s'\n", filename);

            return 100;

        }

    }

 

    /* Write the stream header, if any. */

    avformat_write_header(oc, NULL);

 

    picture->pts = 0;

    for (;;) {

        /* Compute current audio and video time. */

        if (audio_st)

            audio_pts = (double)audio_st->pts.val * audio_st->time_base.num
/ audio_st->time_base.den;

        else

            audio_pts = 0.0;

 

        if (video_st)

            video_pts = (double)video_st->pts.val * video_st->time_base.num
/

                        video_st->time_base.den;

        else

            video_pts = 0.0;

 

        if ((!audio_st || audio_pts >= STREAM_DURATION) &&

            (!video_st || video_pts >= STREAM_DURATION))

            break;

 

        /* write interleaved audio and video frames */

        if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {

            write_audio_frame(oc, audio_st);

        } else {

            write_video_frame(oc, video_st);

            picture->pts++;

        }

    }

 

 

       //flush video encoder

    int stillgotframes = 1;

    while(stillgotframes){

            AVPacket pkt; 

            av_init_packet(&pkt);

            int got_packet=0;

            int ret = -1;

            pkt.size = video_outbuf_size; //1000000

            pkt.data = video_outbuf; // constant location in memory

            ret = avcodec_encode_video2(video_st->codec, &pkt, NULL,
&got_packet);

            

                    if (got_packet){

                           av_interleaved_write_frame(oc, &pkt);

                    }

            stillgotframes = got_packet;

    }

 

 

    /* Write the trailer, if any. The trailer must be written before you

     * close the CodecContexts open when you wrote the header; otherwise

     * av_write_trailer() may try to use memory that was freed on

     * av_codec_close(). */

    av_write_trailer(oc);

 

    /* Close each codec. */

    if (video_st)

        close_video(oc, video_st);

    if (audio_st)

        close_audio(oc, audio_st);

 

    /* Free the streams. */

    for (i = 0; i < oc->nb_streams; i++) {

        av_freep(&oc->streams[i]->codec);

        av_freep(&oc->streams[i]);

    }

 

    if (!(fmt->flags & AVFMT_NOFILE))

        /* Close the output file. */

        avio_close(oc->pb);

 

    /* free the stream */

    av_free(oc);

 

    return 0;

}

 

 

 

 

Von: libav-user-bounces at ffmpeg.org [mailto:libav-user-bounces at ffmpeg.org] Im
Auftrag von Ashwin Chandra - SISA
Gesendet: Friday, February 08, 2013 8:26 PM
An: This list is about using libavcodec, libavformat, libavutil, libavdevice
and libavfilter.
Betreff: Re: [Libav-user] Problem with using libavcodec with
AV_CODEC_ID_H264

 

Tried your flushing method too but did not make a difference. 

 

The only way I can get this to work correctly is if I never flush frames
until I know I'm at the end of the video stream. The problem is in my case I
don't know when the video stream ends or changes into a different one
(different width/height).

 

I still don't get why MPEG2  encoder is fine with flushing after every
encode but H264 is not. 

 

From: libav-user-bounces at ffmpeg.org [mailto:libav-user-bounces at ffmpeg.org]
On Behalf Of Harald Jordan
Sent: Friday, February 08, 2013 10:42 AM
To: 'This list is about using libavcodec, libavformat, libavutil,
libavdevice and libavfilter.'
Subject: Re: [Libav-user] Problem with using libavcodec with
AV_CODEC_ID_H264

 

Hm, i really dont believe your problem is the flushing part itself.

 

I wonder if the pkt you are sending to the encoder is initialized - at least
i cannot see how. There is a huge number of issues in avcodec_encode_video2
that occur when you send it a pkt that it did not expect.

 

Here is how I flush at the end of transcoding:

       int stillgotframes = 1;

       while(stillgotframes){

             AVPacket pkt; 

             av_init_packet(&pkt);

             int got_packet=0;

             int ret = -1;

             pkt.size = video_outbuf_size; //1000000

             pkt.data = video_outbuf; // constant location in memory

             ret = avcodec_encode_video2(video_st->codec, &pkt, NULL,
&got_packet);

             av_interleaved_write_frame(oc, &pkt);

             stillgotframes = got_packet;

       }

 

 

Harry

Von: libav-user-bounces at ffmpeg.org [mailto:libav-user-bounces at ffmpeg.org] Im
Auftrag von Ashwin Chandra - SISA
Gesendet: Friday, February 08, 2013 7:22 PM
An: This list is about using libavcodec, libavformat, libavutil, libavdevice
and libavfilter.
Betreff: Re: [Libav-user] Problem with using libavcodec with
AV_CODEC_ID_H264

 

All those presets I tried but didn't help.

 

From: libav-user-bounces at ffmpeg.org [mailto:libav-user-bounces at ffmpeg.org]
On Behalf Of Ashwin Chandra - SISA
Sent: Friday, February 08, 2013 8:39 AM
To: This list is about using libavcodec, libavformat, libavutil, libavdevice
and libavfilter.
Subject: Re: [Libav-user] Problem with using libavcodec with
AV_CODEC_ID_H264

 

But the decoding_encoding example works for me without any changes. The
problem is if you just want to encode two frames and flush them all out.
With 25 there doesn't seem to be any issues.

 

 

From: libav-user-bounces at ffmpeg.org [mailto:libav-user-bounces at ffmpeg.org]
On Behalf Of Harald Jordan
Sent: Friday, February 08, 2013 12:53 AM
To: 'This list is about using libavcodec, libavformat, libavutil,
libavdevice and libavfilter.'
Subject: Re: [Libav-user] Problem with using libavcodec with
AV_CODEC_ID_H264

 

If you did not make any changes to the open_video and add_video functions of
the decoding_encoding example, it cannot work. For H264 you need to manually
define a set a of H264 private settings before opening the codec.

http://stackoverflow.com/questions/3553003/encoding-h-264-with-libavcodec-x2
64

 

Harry

 

Von: libav-user-bounces at ffmpeg.org [mailto:libav-user-bounces at ffmpeg.org] Im
Auftrag von Ashwin Chandra - SISA
Gesendet: Freitag, 08. Februar 2013 00:29
An: This list is about using libavcodec, libavformat, libavutil, libavdevice
and libavfilter.
Betreff: Re: [Libav-user] Problem with using libavcodec with
AV_CODEC_ID_H264

 

To be clearer, I modified the decoding_encoding.c example in ffmpeg to cause
the problem.

I basically changed the video_encode_example() to encode 1 second worth of
video and then flush the delayed frame. The code below is exactly the same
except 25 frames was replaced with 1 frame.

Now if I take this code block and duplicate it again, so that it basically
runs twice, the second avcodec_encode_video2() will hang.

 

 

/* encode 1 second of video */

    for(i=0;i<1;i++) {

        av_init_packet(&pkt);

        pkt.data = NULL;    // packet data will be allocated by the encoder

        pkt.size = 0;

 

        fflush(stdout);

        /* prepare a dummy image */

        /* Y */

        for(y=0;y<c->height;y++) {

            for(x=0;x<c->width;x++) {

                frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;

           }

        }

 

        /* Cb and Cr */

        for(y=0;y<c->height/2;y++) {

            for(x=0;x<c->width/2;x++) {

                frame->data[1][y * frame->linesize[1] + x] = 128 + y + i *
2;

                frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;

            }

        }

 

        frame->pts = i;

 

        /* encode the image */

        ret = avcodec_encode_video2(c, &pkt, frame, &got_output);

        if (ret < 0) {

            fprintf(stderr, "Error encoding frame\n");

            exit(1);

        }

        

        if (got_output) {

            printf("Write frame %3d (size=%5d)\n", i, pkt.size);

            fwrite(pkt.data, 1, pkt.size, f);

            av_free_packet(&pkt);

        }

    }

 

    /* get the delayed frames */

    for (got_output = 1; got_output; i++) {

        fflush(stdout);

 

        ret = avcodec_encode_video2(c, &pkt, NULL, &got_output);

        if (ret < 0) {

            fprintf(stderr, "Error encoding frame\n");

            exit(1);

        }

 

        if (got_output) {

            printf("Write frame %3d (size=%5d)\n", i, pkt.size);

            fwrite(pkt.data, 1, pkt.size, f);

            av_free_packet(&pkt);

        }

    }

 

 

 

From: libav-user-bounces at ffmpeg.org [mailto:libav-user-bounces at ffmpeg.org]
On Behalf Of Ashwin Chandra - SISA
Sent: Thursday, February 07, 2013 3:08 PM
To: libav-user at ffmpeg.org
Subject: [Libav-user] Problem with using libavcodec with AV_CODEC_ID_H264

 

I have some code that takes a running stream of uncompressed video data and
encodes it using AV_CODEC_ID_H264.

 

The sequence I follow is

1.  Call avcodec_encode_video2 on the AVFrame which contains my uncompressed
frame.

2.  Call avcodec_encode_video2 again passing NULL in the AVFrame parameter.

3.  Repeat 2. Until a frame arrives from the encoder.

 

This seems to work fine if the codec is MPEG2, but with H264, it hangs
inside avcodec_encode_video2 at step 2) on the second frame. I don't have
debug symbols and can't figure out why. Does there need to be a minimum set
of uncompressed data in the encoder before trying to flush out an encoded
frame for H264? If so, how do I know when it is safe to flush a frame?

 

 

 

-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://ffmpeg.org/pipermail/libav-user/attachments/20130209/0825dbe0/attachment.html>
-------------- next part --------------
A non-text attachment was scrubbed...
Name: muxing.c_altered
Type: application/octet-stream
Size: 17756 bytes
Desc: not available
URL: <http://ffmpeg.org/pipermail/libav-user/attachments/20130209/0825dbe0/attachment.obj>


More information about the Libav-user mailing list