[FFmpeg-devel] RTP/H264 encoding using FFMpeg libs

Dominik Tomczak dominiktomczak
Fri Jun 11 16:51:30 CEST 2010


Hello

I try to use libavXXX libraries to do H264 encoding and RTP streaming. I'm
able to do it using FFMpeg itself but now I try to get it from src code to
write my own encoder/streamer. Below is what I do. It does not work, I can't
see VLC plays this stream. I think img convert from RGB->YUV works, H264
encoding too. The problem is with RTP muxing. I do not know how to write
code to use RTP muxer properly. Any hints are warmly welcome.

Thanks
Dominik Tomczak

    const int width = avCodecCtx->getVideoCodecContext()->width;
    const int height = avCodecCtx->getVideoCodecContext()->height;
// Allocate video frame
AVObject<AVFrame> srcFrame(avcodec_alloc_frame, av_free);
    AVObject<AVFrame> dstFrame(avcodec_alloc_frame, av_free);

const PixelFormat srcPixelFormat = PIX_FMT_RGB32;
const int srcSize = avpicture_get_size( srcPixelFormat, width, height );

    const PixelFormat dstPixelFormat = PIX_FMT_YUV420P;
const int dstSize = avpicture_get_size( dstPixelFormat, width, height );
    uint8_t* dst_buffer = (uint8_t *)av_malloc(dstSize);

    const int video_outbuf_size = 200000;
    uint8_t* video_outbuf = (uint8_t *)av_malloc(video_outbuf_size);

    int result = avpicture_fill( (AVPicture*)(AVFrame*)srcFrame, (uint8_t
*)inputBuffer, srcPixelFormat, width, height );
    result = avpicture_fill( (AVPicture*)(AVFrame*)dstFrame, (uint8_t
*)dst_buffer, dstPixelFormat, width, height );


    SwsContext* const swsCtx = sws_getContext( width, height,
srcPixelFormat,
        width, height, dstPixelFormat, SWS_FAST_BILINEAR | SWS_CPU_CAPS_MMX
| SWS_CPU_CAPS_MMX2 | SWS_CPU_CAPS_3DNOW | SWS_CPU_CAPS_ALTIVEC,
        NULL, NULL, NULL );

    result = sws_scale( swsCtx, srcFrame.get()->data,
srcFrame.get()->linesize, 0, height, dstFrame.get()->data,
dstFrame.get()->linesize );

    sws_freeContext( swsCtx );

const int encodedBytes = avcodec_encode_video(
avCodecCtx->getVideoCodecContext(), video_outbuf, video_outbuf_size,
(AVFrame*)dstFrame );
if ( 0 > encodedBytes  )
{
return false;
}
  if ( encodedBytes > 0 )
  {
    AVPacket packet;
    av_init_packet( &packet );
    if(avCodecCtx->getVideoCodecContext()->coded_frame->key_frame)
      packet.flags |= PKT_FLAG_KEY;
    packet.stream_index= avCodecCtx->getVideoStream()->index;
    packet.data= video_outbuf;
    packet.size= encodedBytes;

  static bool once = true;
  if (once)
  {


    result = av_write_header( avCodecCtx->getFormatContext());
    once = false;
  }
  result = av_write_frame( avCodecCtx->getFormatContext(), &packet );
    if ( result != 0 )
    {
        return false;
    }

    av_free(dst_buffer);
    av_free(video_outbuf);
  }



-- 
Dominik Tomczak
Software Engineer
www.dominiktomczak.com



More information about the ffmpeg-devel mailing list