<html>
<head>
<meta http-equiv="content-type" content="text/html; charset=ISO-8859-1">
</head>
<body bgcolor="#FFFFFF" text="#000000">
Hi,<br>
I am implementing an RTSP relay using libavcodec (ffmpeg).
Basically, it connects to an IP camera, which is an RTSP server,
PULLs the stream and then PUSHes it to wowza. I am finding it very
difficult to get any sample implementation for this in the internet.
So I started implementing this using bits and pieces from here and
there. I took code for code samples to a) read RTSP stream and write
to file b)read from file and serve it over RTSP.<br>
<br>
This is the current state of the code:<br>
<i><font color="#009900"><br>
/*<br>
* Copyright (c) 2010 Nicolas George<br>
* Copyright (c) 2011 Stefano Sabatini<br>
* Copyright (c) 2014 Andrey Utkin<br>
*<br>
* Permission is hereby granted, free of charge, to any
person obtaining a copy<br>
* of this software and associated documentation files (the
"Software"), to deal<br>
* in the Software without restriction, including without
limitation the rights<br>
* to use, copy, modify, merge, publish, distribute,
sublicense, and/or sell<br>
* copies of the Software, and to permit persons to whom the
Software is<br>
* furnished to do so, subject to the following conditions:<br>
*<br>
* The above copyright notice and this permission notice
shall be included in<br>
* all copies or substantial portions of the Software.<br>
*<br>
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
KIND, EXPRESS OR<br>
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY,<br>
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL<br>
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER<br>
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM,<br>
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN<br>
* THE SOFTWARE.<br>
*/<br>
<br>
/**<br>
* @file<br>
* API example for demuxing, decoding, filtering, encoding
and muxing<br>
* @example transcoding.c<br>
*/<br>
<br>
#include <libavcodec/avcodec.h><br>
#include <libavformat/avformat.h><br>
#include <libavfilter/avfiltergraph.h><br>
#include <libavfilter/buffersink.h><br>
#include <libavfilter/buffersrc.h><br>
#include <libavutil/opt.h><br>
#include <libavutil/pixdesc.h><br>
<br>
static AVFormatContext *ifmt_ctx,*temp;<br>
static AVFormatContext *ofmt_ctx;<br>
typedef struct FilteringContext {<br>
AVFilterContext *buffersink_ctx;<br>
AVFilterContext *buffersrc_ctx;<br>
AVFilterGraph *filter_graph;<br>
} FilteringContext;<br>
static FilteringContext *filter_ctx;<br>
<br>
static int open_input_file(const char *filename)<br>
{<br>
int ret;<br>
unsigned int i;<br>
<br>
ifmt_ctx = NULL;<br>
if ((ret = avformat_open_input(&ifmt_ctx, filename,
NULL, NULL)) < 0) {<br>
av_log(NULL, AV_LOG_ERROR, "Cannot open input
file\n");<br>
return ret;<br>
}<br>
<br>
if ((ret = avformat_find_stream_info(ifmt_ctx, NULL))
< 0) {<br>
av_log(NULL, AV_LOG_ERROR, "Cannot find stream
information\n");<br>
return ret;<br>
}<br>
<br>
for (i = 0; i < ifmt_ctx->nb_streams; i++) {<br>
AVStream *stream;<br>
AVCodecContext *codec_ctx;<br>
stream = ifmt_ctx->streams[i];<br>
codec_ctx = stream->codec;<br>
/* Reencode video & audio and remux subtitles
etc. */<br>
if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO<br>
|| codec_ctx->codec_type ==
AVMEDIA_TYPE_AUDIO) {<br>
/* Open decoder */<br>
ret = avcodec_open2(codec_ctx,<br>
avcodec_find_decoder(codec_ctx->codec_id), NULL);<br>
if (ret < 0) {<br>
av_log(NULL, AV_LOG_ERROR, "Failed to open
decoder for stream #%u\n", i);<br>
return ret;<br>
}<br>
}<br>
}<br>
<br>
av_dump_format(ifmt_ctx, 0, filename, 0);<br>
return 0;<br>
}<br>
<br>
static int open_output_file(const char *filename)<br>
{<br>
AVStream *out_stream;<br>
AVStream *in_stream;<br>
AVCodecContext *dec_ctx, *enc_ctx;<br>
AVCodec *encoder,*codec;<br>
int ret;<br>
unsigned int i;<br>
<br>
ofmt_ctx = NULL;<br>
avformat_open_input(&temp,
<a class="moz-txt-link-rfc2396E" href="rtsp://localhost:8554/live">"rtsp://localhost:8554/live"</a>, NULL, NULL);<br>
avformat_alloc_output_context2(&ofmt_ctx,
temp->oformat,"rtsp", filename);<br>
if (!ofmt_ctx) {<br>
av_log(NULL, AV_LOG_ERROR, "Could not create output
context\n");<br>
return AVERROR_UNKNOWN;<br>
}<br>
<br>
<br>
for (i = 0; i < ifmt_ctx->nb_streams; i++) {<br>
out_stream = avformat_new_stream(ofmt_ctx, NULL);<br>
if (!out_stream) {<br>
av_log(NULL, AV_LOG_ERROR, "Failed allocating
output stream\n");<br>
return AVERROR_UNKNOWN;<br>
}<br>
<br>
in_stream = ifmt_ctx->streams[i];<br>
dec_ctx = in_stream->codec;<br>
enc_ctx = out_stream->codec;<br>
<br>
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO<br>
|| dec_ctx->codec_type ==
AVMEDIA_TYPE_AUDIO) {<br>
/* in this example, we choose transcoding to
same codec */<br>
encoder =
avcodec_find_encoder(dec_ctx->codec_id);<br>
printf("codec : %d",dec_ctx->codec_id);<br>
if (!encoder) {<br>
av_log(NULL, AV_LOG_FATAL, "Necessary
encoder not found\n");<br>
return AVERROR_INVALIDDATA;<br>
}<br>
<br>
/* In this example, we transcode to same
properties (picture size,<br>
* sample rate etc.). These properties can be
changed for output<br>
* streams easily using filters */<br>
if (dec_ctx->codec_type ==
AVMEDIA_TYPE_VIDEO) {<br>
enc_ctx->height = dec_ctx->height;<br>
enc_ctx->width = dec_ctx->width;<br>
enc_ctx->sample_aspect_ratio =
dec_ctx->sample_aspect_ratio;<br>
/* take first format from list of supported
formats */<br>
enc_ctx->pix_fmt =
encoder->pix_fmts[0];<br>
//enc_ctx->codec_id = AV_CODEC_ID_H264;<br>
/* video time_base can be set to whatever is
handy and supported by encoder */<br>
enc_ctx->time_base =
dec_ctx->time_base;<br>
} else {<br>
enc_ctx->sample_rate =
dec_ctx->sample_rate;<br>
enc_ctx->channel_layout =
dec_ctx->channel_layout;<br>
enc_ctx->channels =
av_get_channel_layout_nb_channels(enc_ctx->channel_layout);<br>
/* take first format from list of supported
formats */<br>
enc_ctx->sample_fmt =
encoder->sample_fmts[0];<br>
enc_ctx->pix_fmt =AV_PIX_FMT_RGB24 ;<br>
enc_ctx->time_base = (AVRational){1,
enc_ctx->sample_rate};<br>
}<br>
<br>
/* Third parameter can be used to pass settings
to encoder */<br>
//codec =
avcodec_find_encoder(AV_CODEC_ID_H264);<br>
ret = avcodec_open2(enc_ctx, encoder, NULL);<br>
if (ret < 0) {<br>
av_log(NULL, AV_LOG_ERROR, "Cannot open
video encoder for stream #%u\n", i);<br>
return ret;<br>
}<br>
} else if (dec_ctx->codec_type ==
AVMEDIA_TYPE_UNKNOWN) {<br>
av_log(NULL, AV_LOG_FATAL, "Elementary stream
#%d is of unknown type, cannot proceed\n", i);<br>
return AVERROR_INVALIDDATA;<br>
} else {<br>
/* if this stream must be remuxed */<br>
ret =
avcodec_copy_context(ofmt_ctx->streams[i]->codec,<br>
ifmt_ctx->streams[i]->codec);<br>
if (ret < 0) {<br>
av_log(NULL, AV_LOG_ERROR, "Copying stream
context failed\n");<br>
return ret;<br>
}<br>
}<br>
<br>
if (ofmt_ctx->oformat->flags &
AVFMT_GLOBALHEADER)<br>
enc_ctx->flags |=
AV_CODEC_FLAG_GLOBAL_HEADER;<br>
<br>
}<br>
av_dump_format(ofmt_ctx, 0, filename, 1);<br>
<br>
if (!(ofmt_ctx->oformat->flags &
AVFMT_NOFILE)) {<br>
ret = avio_open(&ofmt_ctx->pb, filename,
AVIO_FLAG_WRITE);<br>
if (ret < 0) {<br>
av_log(NULL, AV_LOG_ERROR, "Could not open
output file '%s'", filename);<br>
return ret;<br>
}<br>
}<br>
<br>
/* init muxer, write output file header */<br>
ret = avformat_write_header(ofmt_ctx, NULL);<br>
if (ret < 0) {<br>
av_log(NULL, AV_LOG_ERROR, "Error occurred when
opening output file\n");<br>
return ret;<br>
}<br>
<br>
return 0;<br>
}<br>
<br>
static int init_filter(FilteringContext* fctx,
AVCodecContext *dec_ctx,<br>
AVCodecContext *enc_ctx, const char *filter_spec)<br>
{<br>
char args[512];<br>
int ret = 0;<br>
AVFilter *buffersrc = NULL;<br>
AVFilter *buffersink = NULL;<br>
AVFilterContext *buffersrc_ctx = NULL;<br>
AVFilterContext *buffersink_ctx = NULL;<br>
AVFilterInOut *outputs = avfilter_inout_alloc();<br>
AVFilterInOut *inputs = avfilter_inout_alloc();<br>
AVFilterGraph *filter_graph = avfilter_graph_alloc();<br>
<br>
if (!outputs || !inputs || !filter_graph) {<br>
ret = AVERROR(ENOMEM);<br>
goto end;<br>
}<br>
<br>
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {<br>
buffersrc = avfilter_get_by_name("buffer");<br>
buffersink = avfilter_get_by_name("buffersink");<br>
if (!buffersrc || !buffersink) {<br>
av_log(NULL, AV_LOG_ERROR, "filtering source or
sink element not found\n");<br>
ret = AVERROR_UNKNOWN;<br>
goto end;<br>
}<br>
<br>
snprintf(args, sizeof(args),<br>
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",<br>
dec_ctx->width, dec_ctx->height,
dec_ctx->pix_fmt,<br>
dec_ctx->time_base.num,
dec_ctx->time_base.den,<br>
dec_ctx->sample_aspect_ratio.num,<br>
dec_ctx->sample_aspect_ratio.den);<br>
<br>
ret =
avfilter_graph_create_filter(&buffersrc_ctx, buffersrc,
"in",<br>
args, NULL, filter_graph);<br>
if (ret < 0) {<br>
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer
source\n");<br>
goto end;<br>
}<br>
<br>
ret =
avfilter_graph_create_filter(&buffersink_ctx, buffersink,
"out",<br>
NULL, NULL, filter_graph);<br>
if (ret < 0) {<br>
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer
sink\n");<br>
goto end;<br>
}<br>
<br>
ret = av_opt_set_bin(buffersink_ctx, "pix_fmts",<br>
(uint8_t*)&enc_ctx->pix_fmt,
sizeof(enc_ctx->pix_fmt),<br>
AV_OPT_SEARCH_CHILDREN);<br>
if (ret < 0) {<br>
av_log(NULL, AV_LOG_ERROR, "Cannot set output
pixel format\n");<br>
goto end;<br>
}<br>
} else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO)
{<br>
buffersrc = avfilter_get_by_name("abuffer");<br>
buffersink = avfilter_get_by_name("abuffersink");<br>
if (!buffersrc || !buffersink) {<br>
av_log(NULL, AV_LOG_ERROR, "filtering source or
sink element not found\n");<br>
ret = AVERROR_UNKNOWN;<br>
goto end;<br>
}<br>
<br>
if (!dec_ctx->channel_layout)<br>
dec_ctx->channel_layout =<br>
av_get_default_channel_layout(dec_ctx->channels);<br>
snprintf(args, sizeof(args),<br>
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,<br>
dec_ctx->time_base.num,
dec_ctx->time_base.den, dec_ctx->sample_rate,<br>
av_get_sample_fmt_name(dec_ctx->sample_fmt),<br>
dec_ctx->channel_layout);<br>
ret =
avfilter_graph_create_filter(&buffersrc_ctx, buffersrc,
"in",<br>
args, NULL, filter_graph);<br>
if (ret < 0) {<br>
av_log(NULL, AV_LOG_ERROR, "Cannot create audio
buffer source\n");<br>
goto end;<br>
}<br>
<br>
ret =
avfilter_graph_create_filter(&buffersink_ctx, buffersink,
"out",<br>
NULL, NULL, filter_graph);<br>
if (ret < 0) {<br>
av_log(NULL, AV_LOG_ERROR, "Cannot create audio
buffer sink\n");<br>
goto end;<br>
}<br>
<br>
ret = av_opt_set_bin(buffersink_ctx, "sample_fmts",<br>
(uint8_t*)&enc_ctx->sample_fmt,
sizeof(enc_ctx->sample_fmt),<br>
AV_OPT_SEARCH_CHILDREN);<br>
if (ret < 0) {<br>
av_log(NULL, AV_LOG_ERROR, "Cannot set output
sample format\n");<br>
goto end;<br>
}<br>
<br>
ret = av_opt_set_bin(buffersink_ctx,
"channel_layouts",<br>
(uint8_t*)&enc_ctx->channel_layout,<br>
sizeof(enc_ctx->channel_layout),
AV_OPT_SEARCH_CHILDREN);<br>
if (ret < 0) {<br>
av_log(NULL, AV_LOG_ERROR, "Cannot set output
channel layout\n");<br>
goto end;<br>
}<br>
<br>
ret = av_opt_set_bin(buffersink_ctx, "sample_rates",<br>
(uint8_t*)&enc_ctx->sample_rate,
sizeof(enc_ctx->sample_rate),<br>
AV_OPT_SEARCH_CHILDREN);<br>
if (ret < 0) {<br>
av_log(NULL, AV_LOG_ERROR, "Cannot set output
sample rate\n");<br>
goto end;<br>
}<br>
} else {<br>
ret = AVERROR_UNKNOWN;<br>
goto end;<br>
}<br>
<br>
/* Endpoints for the filter graph. */<br>
outputs->name = av_strdup("in");<br>
outputs->filter_ctx = buffersrc_ctx;<br>
outputs->pad_idx = 0;<br>
outputs->next = NULL;<br>
<br>
inputs->name = av_strdup("out");<br>
inputs->filter_ctx = buffersink_ctx;<br>
inputs->pad_idx = 0;<br>
inputs->next = NULL;<br>
<br>
if (!outputs->name || !inputs->name) {<br>
ret = AVERROR(ENOMEM);<br>
goto end;<br>
}<br>
<br>
if ((ret = avfilter_graph_parse_ptr(filter_graph,
filter_spec,<br>
&inputs, &outputs, NULL)) <
0)<br>
goto end;<br>
<br>
if ((ret = avfilter_graph_config(filter_graph, NULL))
< 0)<br>
goto end;<br>
<br>
/* Fill FilteringContext */<br>
fctx->buffersrc_ctx = buffersrc_ctx;<br>
fctx->buffersink_ctx = buffersink_ctx;<br>
fctx->filter_graph = filter_graph;<br>
<br>
end:<br>
avfilter_inout_free(&inputs);<br>
avfilter_inout_free(&outputs);<br>
<br>
return ret;<br>
}<br>
<br>
static int init_filters(void)<br>
{<br>
const char *filter_spec;<br>
unsigned int i;<br>
int ret;<br>
filter_ctx = av_malloc_array(ifmt_ctx->nb_streams,
sizeof(*filter_ctx));<br>
if (!filter_ctx)<br>
return AVERROR(ENOMEM);<br>
<br>
for (i = 0; i < ifmt_ctx->nb_streams; i++) {<br>
filter_ctx[i].buffersrc_ctx = NULL;<br>
filter_ctx[i].buffersink_ctx = NULL;<br>
filter_ctx[i].filter_graph = NULL;<br>
if
(!(ifmt_ctx->streams[i]->codec->codec_type ==
AVMEDIA_TYPE_AUDIO<br>
||
ifmt_ctx->streams[i]->codec->codec_type ==
AVMEDIA_TYPE_VIDEO))<br>
continue;<br>
<br>
<br>
if (ifmt_ctx->streams[i]->codec->codec_type
== AVMEDIA_TYPE_VIDEO)<br>
filter_spec = "null"; /* passthrough (dummy)
filter for video */<br>
else<br>
filter_spec = "anull"; /* passthrough (dummy)
filter for audio */<br>
ret = init_filter(&filter_ctx[i],
ifmt_ctx->streams[i]->codec,<br>
ofmt_ctx->streams[i]->codec,
filter_spec);<br>
if (ret)<br>
return ret;<br>
}<br>
return 0;<br>
}<br>
<br>
static int encode_write_frame(AVFrame *filt_frame, unsigned
int stream_index, int *got_frame) {<br>
int ret;<br>
int got_frame_local;<br>
AVPacket enc_pkt;<br>
int (*enc_func)(AVCodecContext *, AVPacket *, const
AVFrame *, int *) =<br>
(ifmt_ctx->streams[stream_index]->codec->codec_type ==<br>
AVMEDIA_TYPE_VIDEO) ? avcodec_encode_video2 :
avcodec_encode_audio2;<br>
<br>
if (!got_frame)<br>
got_frame = &got_frame_local;<br>
<br>
av_log(NULL, AV_LOG_INFO, "Encoding frame\n");<br>
/* encode filtered frame */<br>
enc_pkt.data = NULL;<br>
enc_pkt.size = 0;<br>
av_init_packet(&enc_pkt);<br>
ret =
enc_func(ofmt_ctx->streams[stream_index]->codec,
&enc_pkt,<br>
filt_frame, got_frame);<br>
av_frame_free(&filt_frame);<br>
if (ret < 0)<br>
return ret;<br>
if (!(*got_frame))<br>
return 0;<br>
<br>
/* prepare packet for muxing */<br>
enc_pkt.stream_index = stream_index;<br>
av_packet_rescale_ts(&enc_pkt,<br>
ofmt_ctx->streams[stream_index]->codec->time_base,<br>
ofmt_ctx->streams[stream_index]->time_base);<br>
<br>
av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");<br>
/* mux encoded frame */<br>
ret = av_interleaved_write_frame(ofmt_ctx,
&enc_pkt);<br>
return ret;<br>
}<br>
<br>
static int filter_encode_write_frame(AVFrame *frame,
unsigned int stream_index)<br>
{<br>
int ret;<br>
AVFrame *filt_frame;<br>
<br>
av_log(NULL, AV_LOG_INFO, "Pushing decoded frame to
filters\n");<br>
/* push the decoded frame into the filtergraph */<br>
ret =
av_buffersrc_add_frame_flags(filter_ctx[stream_index].buffersrc_ctx,<br>
frame, 0);<br>
if (ret < 0) {<br>
av_log(NULL, AV_LOG_ERROR, "Error while feeding the
filtergraph\n");<br>
return ret;<br>
}<br>
<br>
/* pull filtered frames from the filtergraph */<br>
while (1) {<br>
filt_frame = av_frame_alloc();<br>
if (!filt_frame) {<br>
ret = AVERROR(ENOMEM);<br>
break;<br>
}<br>
av_log(NULL, AV_LOG_INFO, "Pulling filtered frame
from filters\n");<br>
ret =
av_buffersink_get_frame(filter_ctx[stream_index].buffersink_ctx,<br>
filt_frame);<br>
if (ret < 0) {<br>
/* if no more frames for output - returns
AVERROR(EAGAIN)<br>
* if flushed and no more frames for output -
returns AVERROR_EOF<br>
* rewrite retcode to 0 to show it as normal
procedure completion<br>
*/<br>
if (ret == AVERROR(EAGAIN) || ret ==
AVERROR_EOF)<br>
ret = 0;<br>
av_frame_free(&filt_frame);<br>
break;<br>
}<br>
<br>
filt_frame->pict_type = AV_PICTURE_TYPE_NONE;<br>
ret = encode_write_frame(filt_frame, stream_index,
NULL);<br>
if (ret < 0)<br>
break;<br>
}<br>
<br>
return ret;<br>
}<br>
<br>
static int flush_encoder(unsigned int stream_index)<br>
{<br>
int ret;<br>
int got_frame;<br>
<br>
if
(!(ofmt_ctx->streams[stream_index]->codec->codec->capabilities
&<br>
AV_CODEC_CAP_DELAY))<br>
return 0;<br>
<br>
while (1) {<br>
av_log(NULL, AV_LOG_INFO, "Flushing stream #%u
encoder\n", stream_index);<br>
ret = encode_write_frame(NULL, stream_index,
&got_frame);<br>
if (ret < 0)<br>
break;<br>
if (!got_frame)<br>
return 0;<br>
}<br>
return ret;<br>
}<br>
<br>
int main(int argc, char **argv)<br>
{<br>
int ret;<br>
AVPacket packet = { .data = NULL, .size = 0 };<br>
AVFrame *frame = NULL;<br>
enum AVMediaType type;<br>
unsigned int stream_index;<br>
unsigned int i;<br>
int got_frame;<br>
int (*dec_func)(AVCodecContext *, AVFrame *, int *,
const AVPacket *);<br>
<br>
if (argc != 3) {<br>
av_log(NULL, AV_LOG_ERROR, "Usage: %s <input
file> <output file>\n", argv[0]);<br>
return 1;<br>
}<br>
<br>
av_register_all();<br>
avfilter_register_all();<br>
avformat_network_init();<br>
<br>
if ((ret = open_input_file(argv[1])) < 0)<br>
goto end;<br>
if ((ret = open_output_file(argv[2])) < 0)<br>
goto end;<br>
if ((ret = init_filters()) < 0)<br>
goto end;<br>
<br>
/* read all packets */<br>
while (1) {<br>
if ((ret = av_read_frame(ifmt_ctx, &packet))
< 0)<br>
break;<br>
stream_index = packet.stream_index;<br>
type =
ifmt_ctx->streams[packet.stream_index]->codec->codec_type;<br>
av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of
stream_index %u\n",<br>
stream_index);<br>
<br>
if (filter_ctx[stream_index].filter_graph) {<br>
av_log(NULL, AV_LOG_DEBUG, "Going to
reencode&filter the frame\n");<br>
frame = av_frame_alloc();<br>
if (!frame) {<br>
ret = AVERROR(ENOMEM);<br>
break;<br>
}<br>
av_packet_rescale_ts(&packet,<br>
ifmt_ctx->streams[stream_index]->time_base,<br>
ifmt_ctx->streams[stream_index]->codec->time_base);<br>
dec_func = (type == AVMEDIA_TYPE_VIDEO) ?
avcodec_decode_video2 :<br>
avcodec_decode_audio4;<br>
ret =
dec_func(ifmt_ctx->streams[stream_index]->codec, frame,<br>
&got_frame, &packet);<br>
if (ret < 0) {<br>
av_frame_free(&frame);<br>
av_log(NULL, AV_LOG_ERROR, "Decoding
failed\n");<br>
break;<br>
}<br>
<br>
if (got_frame) {<br>
frame->pts =
av_frame_get_best_effort_timestamp(frame);<br>
ret = filter_encode_write_frame(frame,
stream_index);<br>
av_frame_free(&frame);<br>
if (ret < 0)<br>
goto end;<br>
} else {<br>
av_frame_free(&frame);<br>
}<br>
} else {<br>
/* remux this frame without reencoding */<br>
av_packet_rescale_ts(&packet,<br>
ifmt_ctx->streams[stream_index]->time_base,<br>
ofmt_ctx->streams[stream_index]->time_base);<br>
<br>
ret = av_interleaved_write_frame(ofmt_ctx,
&packet);<br>
if (ret < 0)<br>
goto end;<br>
}<br>
av_packet_unref(&packet);<br>
}<br>
<br>
/* flush filters and encoders */<br>
for (i = 0; i < ifmt_ctx->nb_streams; i++) {<br>
/* flush filter */<br>
if (!filter_ctx[i].filter_graph)<br>
continue;<br>
ret = filter_encode_write_frame(NULL, i);<br>
if (ret < 0) {<br>
av_log(NULL, AV_LOG_ERROR, "Flushing filter
failed\n");<br>
goto end;<br>
}<br>
<br>
/* flush encoder */<br>
ret = flush_encoder(i);<br>
if (ret < 0) {<br>
av_log(NULL, AV_LOG_ERROR, "Flushing encoder
failed\n");<br>
goto end;<br>
}<br>
}<br>
<br>
av_write_trailer(ofmt_ctx);<br>
end:<br>
av_packet_unref(&packet);<br>
av_frame_free(&frame);<br>
for (i = 0; i < ifmt_ctx->nb_streams; i++) {<br>
avcodec_close(ifmt_ctx->streams[i]->codec);<br>
if (ofmt_ctx && ofmt_ctx->nb_streams >
i && ofmt_ctx->streams[i] &&
ofmt_ctx->streams[i]->codec)<br>
avcodec_close(ofmt_ctx->streams[i]->codec);<br>
if (filter_ctx &&
filter_ctx[i].filter_graph)<br>
avfilter_graph_free(&filter_ctx[i].filter_graph);<br>
}<br>
av_free(filter_ctx);<br>
avformat_close_input(&ifmt_ctx);<br>
if (ofmt_ctx && !(ofmt_ctx->oformat->flags
& AVFMT_NOFILE))<br>
avio_closep(&ofmt_ctx->pb);<br>
avformat_free_context(ofmt_ctx);<br>
<br>
if (ret < 0)<br>
av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n",
av_err2str(ret));<br>
<br>
return ret ? 1 : 0;<br>
}</font></i><br>
<br>
When we run this we are getting this error:<br>
<br>
<i><font color="#009900"> Input #0, rtsp, from
'<a class="moz-txt-link-freetext" href="rtsp://localhost:8554/live">rtsp://localhost:8554/live</a>':<br>
Metadata:<br>
title : Unnamed<br>
comment : N/A<br>
Duration: N/A, start: 5294.642467, bitrate: N/A<br>
Stream #0:0: Video: h264 (Constrained Baseline),
yuv420p, 640x480 [SAR 1:1 DAR 4:3], 30 fps, 30 tbr, 90k tbn, 60
tbc<br>
[libx264 @ 0xe87c80] Specified pixel format -1 is invalid or
not supported<br>
Cannot open video encoder for stream #0<br>
codec : 28 and pix :12<br>
Error occurred: Invalid argument</font></i><br>
<br>
But I could see that the correct pixel format macro
"AV_PIX_FMT_YUV420P" is provided. <br>
<br>
Thanks
</body>
</html>