I have tried muxing.c with some modifications and I get a movie file but that starts at a timestamp other than 0, frame 3 or 5 to be precise of 24fps, depending codec, and lacks the last 3-5 frames fed into it.<br><br><br>
<br>#define STREAM_PIX_FMT PIX_FMT_YUV420P /* default pix_fmt */<br><br>static AVFrame *picture, *tmp_picture;<br>static uint8_t *video_outbuf;<br>static int frame_count = 0, video_outbuf_size;<br><br>static AVFrame *alloc_picture(enum PixelFormat pix_fmt, int width, int height)<br>
{<br> AVFrame *picture;<br> uint8_t *picture_buf;<br> int size;<br><br> picture = avcodec_alloc_frame();<br> if (!picture)<br> return NULL;<br> size = avpicture_get_size(pix_fmt, width, height);<br>
picture_buf = (uint8_t*)av_malloc(size);<br> if (!picture_buf) {<br> av_free(picture);<br> return NULL;<br> }<br> avpicture_fill((AVPicture *)picture, picture_buf,<br> pix_fmt, width, height);<br>
return picture;<br>}<br>static bool open_video(AVFormatContext *oc, AVStream *st,<br> const CMedia* img )<br>{<br> AVCodecContext* c = st->codec;<br><br> /* find the video encoder */<br> AVCodec* codec = avcodec_find_encoder(c->codec_id);<br>
if (!codec) {<br> LOG_ERROR( _("Video codec not found") );<br> return false;<br> }<br><br> /* open the codec */<br> if (avcodec_open2(c, codec, NULL) < 0) {<br> LOG_ERROR( _("Could not open video codec") );<br>
return false;<br> }<br><br> video_outbuf = NULL;<br> if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {<br> /* Allocate output buffer. */<br> /* XXX: API change will be done. */<br> /* Buffers passed into lav* can be allocated any way you prefer,<br>
* as long as they're aligned enough for the architecture, and<br> * they're freed appropriately (such as using av_free for buffers<br> * allocated with av_malloc). */<br> video_outbuf_size = 2048*2048*3;<br>
video_outbuf = (uint8_t*)av_malloc(video_outbuf_size);<br> }<br><br> /* Allocate the encoded raw picture. */<br> picture = alloc_picture(c->pix_fmt, img->width(), img->height());<br> if (!picture) {<br>
LOG_ERROR( _("Could not allocate picture") );<br> return false;<br> }<br><br> return true;<br>}<br><br>static void close_video(AVFormatContext *oc, AVStream *st)<br>{<br><br> avcodec_close(st->codec);<br>
av_free(picture->data[0]);<br> av_free(picture);<br> av_free(video_outbuf);<br>}<br><br>/* prepare a dummy image */<br>static void fill_yuv_image(AVFrame *pict, const CMedia* img )<br>{<br> CMedia* m = (CMedia*) img;<br>
<br> image_type_ptr hires = img->hires();<br><br> unsigned w = img->width();<br> unsigned h = img->height();<br><br> for ( unsigned y = 0; y < h; ++y )<br> {<br> for ( unsigned x = 0; x < w; ++x )<br>
{<br> ImagePixel p = hires->pixel( x, y );<br><br> if ( img->gamma() != 1.0f )<br> {<br> float gamma = 1.0f/img->gamma();<br> p.r = powf( p.r, gamma );<br> p.g = powf( p.g, gamma );<br>
p.b = powf( p.b, gamma );<br><br> if (p.r < 0.0f) p.r = 0.0f;<br> if (p.g < 0.0f) p.g = 0.0f;<br> if (p.b < 0.0f) p.b = 0.0f;<br> if (p.r > 1.0f) p.r = 1.0f;<br> if (p.g > 1.0f) p.g = 1.0f;<br>
if (p.b > 1.0f) p.b = 1.0f;<br><br> }<br><br> ImagePixel yuv = color::rgb::to_ITU601( p );<br><br> pict->data[0][y * pict->linesize[0] + x ] = yuv.r;<br><br> unsigned x2 = x / 2;<br> unsigned y2 = y / 2;<br>
<br> pict->data[1][y2 * pict->linesize[1] + x2 ] = yuv.g;<br> pict->data[2][y2 * pict->linesize[2] + x2 ] = yuv.b;<br> }<br> }<br> <br>}<br><br>static bool write_video_frame(AVFormatContext* oc, AVStream* st,<br>
const CMedia* img )<br>{<br> int out_size, ret;<br> AVCodecContext *c = NULL;<br><br> c = st->codec;<br><br> if (frame_count >= img->last_frame() - img->first_frame() + 1) {<br> /* No more frames to compress. The codec has a latency of a few<br>
* frames if using B-frames, so we get the last frames by<br> * passing the same picture again. */<br> } else {<br> fill_yuv_image( picture, img );<br> }<br><br> AVPacket pkt;<br> av_init_packet(&pkt);<br>
pkt.data = video_outbuf;<br> pkt.size = video_outbuf_size;<br><br> int got_pic = 0;<br><br> /* encode the image */<br> ret = avcodec_encode_video2(c, &pkt, picture, &got_pic);<br> if (!ret && got_pic && c->coded_frame) {<br>
c->coded_frame->pts = pkt.pts;<br> c->coded_frame->key_frame = !!(pkt.flags & AV_PKT_FLAG_KEY);<br> }<br><br> /* free any side data since we cannot return it */<br> if (pkt.side_data_elems > 0) {<br>
int i;<br> for (i = 0; i < pkt.side_data_elems; i++)<br> av_free(pkt.side_data[i].data);<br> av_freep(&pkt.side_data);<br> pkt.side_data_elems = 0;<br> }<br><br> /* If size is zero, it means the image was buffered. */<br>
ret = ret ? ret : pkt.size;<br><br> if (c->coded_frame->pts != AV_NOPTS_VALUE)<br> pkt.pts = av_rescale_q(c->coded_frame->pts,<br> c->time_base, st->time_base);<br> if (c->coded_frame->key_frame)<br>
pkt.flags |= AV_PKT_FLAG_KEY;<br> <br> pkt.stream_index = st->index;<br> <br> /* Write the compressed frame to the media file. */<br> ret = av_interleaved_write_frame(oc, &pkt);<br> <br>
if (ret != 0) {<br> LOG_ERROR( _("Error while writing video frame") );<br> return false;<br> }<br><br> if ( frame_count >= img->last_frame() - img->first_frame() + 1 )<br> {<br>
cerr << "got pic" << endl;<br> for (got_pic = 1; got_pic; ++frame_count) {<br> <br> ret = avcodec_encode_video2(c, &pkt, NULL, &got_pic);<br> if (ret < 0) {<br>
LOG_ERROR( "error encoding video frame");<br> break;<br> }<br><br> if (got_pic) {<br> av_free_packet(&pkt);<br> }<br> }<br> }<br><br> frame_count++;<br> return true;<br>
}<br>static AVStream *add_video_stream(AVFormatContext *oc,<br> AVCodec** codec,<br> enum CodecID codec_id,<br> const CMedia* img )<br>{<br> /* find the video encoder */<br>
*codec = avcodec_find_encoder(codec_id);<br> if (!(*codec)) {<br> LOG_ERROR( _( "Video codec not found") );<br> return NULL;<br> }<br><br> AVStream* st = avformat_new_stream(oc, NULL);<br>
if (!st) {<br> LOG_ERROR( _("Could not alloc stream") );<br> return NULL;<br> }<br><br> AVCodecContext* c = st->codec;<br><br> avcodec_get_context_defaults3(c, *codec);<br><br> c->codec_id = codec_id;<br>
<br> <br> /* resolution must be a multiple of two */<br> c->width = (img->width() / 2) * 2;<br> c->height = (img->height() / 2) * 2;<br><br> /* put sample parameters */<br> c->bit_rate = c->width * c->height * 3;<br>
c->bit_rate_tolerance = 5000000;<br> c->global_quality = 1;<br> c->compression_level = FF_COMPRESSION_DEFAULT;<br> /* time base: this is the fundamental unit of time (in seconds) in terms<br> of which frame timestamps are represented. for fixed-fps content,<br>
timebase should be 1/framerate and timestamp increments should be<br> identically 1. */<br> c->time_base.den = img->fps();<br> c->time_base.num = 1;<br> c->ticks_per_frame = 2;<br> c->gop_size = 12; /* emit one intra frame every twelve frames at most */<br>
c->pix_fmt = STREAM_PIX_FMT;<br> c->max_b_frames = 1;<br> c->me_method = 5; <br> if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {<br> /* just for testing, we also add B frames */<br> c->max_b_frames = 2;<br>
}<br> // c->b_quant_factor = 1;<br> // c->b_quant_offset = 0.0f;<br> // c->mpeg_quant = 0;<br> // c->i_quant_factor = 1.0f;<br> // c->i_quant_offset = 0.0f;<br> // c->p_masking = 0.0f;<br>
// c->dark_masking = 0.0f;<br> // c->me_cmp = 7;<br> // c->me_sub_cmp = 7;<br> // c->ildct_cmp = FF_CMP_SSE;<br> // c->last_predictor_count = 2;<br> // c->pre_me = 7;<br> // c->me_pre_cmp = 7;<br>
// c->pre_dia_size = 8;<br> // c->me_subpel_quality = 2;<br> // c->me_range = 0;<br> // c->intra_quant_bias = FF_DEFAULT_QUANT_BIAS;<br> // c->inter_quant_bias = FF_DEFAULT_QUANT_BIAS;<br> // c->mb_decision = FF_MB_DECISION_RD;<br>
// c->me_threshold = 8;<br> // c->mb_threshold = 8;<br> // c->intra_dc_precision = 1;<br> // c->keyint_min = 4;<br><br> // some formats want stream headers to be separate<br> if (oc->oformat->flags & AVFMT_GLOBALHEADER)<br>
c->flags |= CODEC_FLAG_GLOBAL_HEADER;<br><br> return st;<br>}<br><br>static AVFormatContext *oc = NULL;<br>static AVOutputFormat* fmt = NULL;<br>static AVStream* audio_st = NULL, *video_st = NULL;<br><br>bool aviImage::open_movie( const char* filename, const CMedia* img )<br>
{<br> int ret = 0;<br><br> int i;<br> frame_count = 0;<br><br> av_register_all();<br><br> if ( oc == NULL )<br> {<br> avformat_alloc_output_context2(&oc, NULL, NULL, filename);<br> if (!oc) {<br>
LOG_INFO( _("Could not deduce output format from file extension: using MPEG.") );<br> avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);<br> }<br><br> <br> fmt = oc->oformat;<br>
<br><br> video_st = NULL;<br> audio_st = NULL;<br> if (fmt->video_codec != CODEC_ID_NONE) {<br> video_st = add_video_stream(oc, &video_codec, fmt->video_codec, img);<br> }<br> <br> <br>
if (img->has_audio() && fmt->audio_codec != CODEC_ID_NONE) {<br> audio_st = add_audio_stream(oc, &audio_cdc, fmt->audio_codec);<br> }<br> <br><br> /* Now that all the parameters are set, we can open the audio and<br>
* video codecs and allocate the necessary encode buffers. */<br> if (video_st)<br> if ( ! open_video(oc, video_st, img) )<br> return false;<br><br> if (audio_st)<br> if ( ! open_audio_static(oc, audio_cdc, audio_st) )<br>
{<br> audio_st = NULL;<br> if ( !video_st ) return false;<br> }<br><br> if (!(fmt->flags & AVFMT_NOFILE)) {<br> if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) {<br>
LOG_ERROR( _("Could not open '") << filename << "'" );<br> return false;<br> }<br> }<br><br> picture->pts = 0;<br><br> /* Write the stream header, if any. */<br>
avformat_write_header(oc, NULL);<br><br><br> }<br><br> return true;<br>}<br><br><br><br>bool aviImage::save_movie_frame( const CMedia* img )<br>{<br><br> double audio_pts, video_pts;<br><br> if (audio_st)<br> audio_pts = ((double)audio_st->pts.val * audio_st->time_base.num / <br>
audio_st->time_base.den);<br> else<br> audio_pts = 0.0;<br> <br> if (video_st)<br> video_pts = ((double)video_st->pts.val * video_st->time_base.num /<br> video_st->time_base.den);<br>
<br> /* write interleaved audio and video frames */<br> if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {<br> write_audio_frame(oc, audio_st, img);<br> } else {<br> write_video_frame(oc, video_st, img);<br>
picture->pts++;<br> }<br>}<br><br>bool aviImage::close_movie()<br>{<br> /* Write the trailer, if any. The trailer must be written before you<br> * close the CodecContexts open when you wrote the header; otherwise<br>
* av_write_trailer() may try to use memory that was freed on<br> * av_codec_close(). */<br> av_write_trailer(oc);<br> <br> /* Close each codec. */<br> if (video_st)<br> close_video(oc, video_st);<br>
if (audio_st)<br> close_audio_static(oc, audio_st);<br><br> /* Free the streams. */<br> for (int i = 0; i < oc->nb_streams; i++) {<br> av_freep(&oc->streams[i]->codec);<br> av_freep(&oc->streams[i]);<br>
}<br> if (!(fmt->flags & AVFMT_NOFILE))<br> /* Close the output file. */<br> avio_close(oc->pb);<br><br> /* free the stream */<br> av_free(oc);<br> oc = NULL;<br> return true;<br>}<br>
<br>void save_sequence_file( CMedia* img, const mrv::ViewerUI* uiMain, <br> const char* startdir)<br>{<br> if (!img) return;<br><br> const char* file = flu_save_chooser("Save Sequence", <br> kIMAGE_PATTERN.c_str(), startdir);<br>
if ( !file ) return;<br><br> <br> std::string tmp = file;<br> std::transform( tmp.begin(), tmp.end(), tmp.begin(),<br> (int(*)(int)) tolower);<br> std::string ext = tmp.c_str() + tmp.size() - 4;<br>
<br> bool movie = false;<br> if ( ext == ".avi" || ext == ".mov" || ext == ".mp4" || ext == ".wmv" )<br> {<br> movie = true;<br> }<br><br> std::string root, fileseq = file;<br>
bool ok = mrv::fileroot( root, fileseq );<br> if ( !ok && !movie ) return;<br><br> if ( movie )<br> {<br> root = root.substr( 0, root.size() - 4 );<br> }<br><br> fltk::ProgressBar* progress = NULL;<br>
fltk::Window* main = (fltk::Window*)uiMain->uiMain;<br> fltk::Window* w = new fltk::Window( main->x(), main->y() + main->h()/2, <br> main->w(), 80 );<br> w->child_of(main);<br>
w->begin();<br> mrv::Timeline* timeline = uiMain->uiTimeline;<br> int64_t first = timeline->minimum();<br> int64_t last = timeline->maximum();<br> progress = new fltk::ProgressBar( 0, 20, w->w(), w->h()-20 );<br>
progress->range( 0, last - first + 1 );<br> progress->align( fltk::ALIGN_TOP );<br> char title[1024];<br> sprintf( title, "Saving Sequence %" PRId64 " - %" PRId64,<br> first, last );<br>
progress->label( title );<br> progress->showtext(true);<br> w->end();<br> w->show();<br><br> fltk::check();<br> <br> int64_t dts = first;<br> int64_t frame = first;<br> int64_t failed_frame = frame-1;<br>
<br> const char* fileroot = root.c_str();<br><br> mrv::media old;<br> bool open_movie = false;<br> int movie_count = 1;<br><br> bool edl = uiMain->uiTimeline->edl();<br><br> for ( ; frame <= last; ++frame )<br>
{<br> int step = 1;<br> <br> uiMain->uiReelWindow->uiBrowser->seek( frame );<br> mrv::media fg = uiMain->uiView->foreground();<br> if (!fg) break;<br><br> CMedia* img = fg->image();<br>
<br> if ( old != fg )<br> {<br> old = fg;<br> if ( open_movie )<br> {<br> aviImage::close_movie();<br> open_movie = false;<br> }<br> if ( movie )<br> {<br> char buf[256];<br>
if ( edl )<br> {<br> sprintf( buf, "%s%d%s", root.c_str(), movie_count,<br> ext.c_str() );<br> }<br> else<br> {<br> sprintf( buf, "%s%s", root.c_str(), ext.c_str() );<br>
}<br><br> if ( fs::exists( buf ) )<br> {<br> int ok = fltk::ask( "Do you want to replace '%s'",<br> buf );<br> if (!ok) <br> {<br> break;<br>
}<br> }<br><br> if ( aviImage::open_movie( buf, img ) )<br> {<br> open_movie = true;<br> ++movie_count;<br> }<br> }<br> }<br><br> <br> {<br> <br>
if (movie)<br> {<br> aviImage::save_movie_frame( img );<br> }<br> else <br> {<br> char buf[1024];<br> sprintf( buf, fileroot, frame );<br> img->save( buf );<br>
}<br> }<br> <br> progress->step(1);<br> fltk::check();<br> <br> if ( !w->visible() ) {<br> break;<br> }<br> }<br><br> if ( open_movie )<br> {<br> aviImage::close_movie();<br>
open_movie = false;<br> }<br><br> if ( w )<br> {<br> w->hide();<br> w->destroy();<br> }<br>}<br><br><br><br><br><br>--<br>Gonzalo Garramuño<br><a href="mailto:ggarra13@gmail.com">ggarra13@gmail.com</a><br>