FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
f_interleave.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013 Stefano Sabatini
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * audio and video interleaver
24  */
25 
26 #include "libavutil/avassert.h"
27 #include "libavutil/avstring.h"
28 #include "libavutil/opt.h"
29 #include "avfilter.h"
30 #include "bufferqueue.h"
31 #include "formats.h"
32 #include "internal.h"
33 #include "audio.h"
34 #include "video.h"
35 
36 typedef struct {
37  const AVClass *class;
38  int nb_inputs;
39  struct FFBufQueue *queues;
41 
42 #define OFFSET(x) offsetof(InterleaveContext, x)
43 
44 #define DEFINE_OPTIONS(filt_name, flags_) \
45 static const AVOption filt_name##_options[] = { \
46  { "nb_inputs", "set number of inputs", OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, .flags = flags_ }, \
47  { "n", "set number of inputs", OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, .flags = flags_ }, \
48  { NULL } \
49 }
50 
51 inline static int push_frame(AVFilterContext *ctx)
52 {
53  InterleaveContext *s = ctx->priv;
54  AVFrame *frame;
55  int i, queue_idx = -1;
56  int64_t pts_min = INT64_MAX;
57 
58  /* look for oldest frame */
59  for (i = 0; i < ctx->nb_inputs; i++) {
60  struct FFBufQueue *q = &s->queues[i];
61 
62  if (!q->available && !ctx->inputs[i]->closed)
63  return 0;
64  if (q->available) {
65  frame = ff_bufqueue_peek(q, 0);
66  if (frame->pts < pts_min) {
67  pts_min = frame->pts;
68  queue_idx = i;
69  }
70  }
71  }
72 
73  /* all inputs are closed */
74  if (queue_idx < 0)
75  return AVERROR_EOF;
76 
77  frame = ff_bufqueue_get(&s->queues[queue_idx]);
78  av_log(ctx, AV_LOG_DEBUG, "queue:%d -> frame time:%f\n",
79  queue_idx, frame->pts * av_q2d(AV_TIME_BASE_Q));
80  return ff_filter_frame(ctx->outputs[0], frame);
81 }
82 
83 static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
84 {
85  AVFilterContext *ctx = inlink->dst;
86  InterleaveContext *s = ctx->priv;
87  unsigned in_no = FF_INLINK_IDX(inlink);
88 
89  if (frame->pts == AV_NOPTS_VALUE) {
91  "NOPTS value for input frame cannot be accepted, frame discarded\n");
92  av_frame_free(&frame);
93  return AVERROR_INVALIDDATA;
94  }
95 
96  /* queue frame */
97  frame->pts = av_rescale_q(frame->pts, inlink->time_base, AV_TIME_BASE_Q);
98  av_log(ctx, AV_LOG_DEBUG, "frame pts:%f -> queue idx:%d available:%d\n",
99  frame->pts * av_q2d(AV_TIME_BASE_Q), in_no, s->queues[in_no].available);
100  ff_bufqueue_add(ctx, &s->queues[in_no], frame);
101 
102  return push_frame(ctx);
103 }
104 
105 static av_cold int init(AVFilterContext *ctx)
106 {
107  InterleaveContext *s = ctx->priv;
108  const AVFilterPad *outpad = &ctx->filter->outputs[0];
109  int i;
110 
111  s->queues = av_calloc(s->nb_inputs, sizeof(s->queues[0]));
112  if (!s->queues)
113  return AVERROR(ENOMEM);
114 
115  for (i = 0; i < s->nb_inputs; i++) {
116  AVFilterPad inpad = { 0 };
117 
118  inpad.name = av_asprintf("input%d", i);
119  if (!inpad.name)
120  return AVERROR(ENOMEM);
121  inpad.type = outpad->type;
122  inpad.filter_frame = filter_frame;
123 
124  switch (outpad->type) {
125  case AVMEDIA_TYPE_VIDEO:
127  case AVMEDIA_TYPE_AUDIO:
129  default:
130  av_assert0(0);
131  }
132  ff_insert_inpad(ctx, i, &inpad);
133  }
134 
135  return 0;
136 }
137 
138 static av_cold void uninit(AVFilterContext *ctx)
139 {
140  InterleaveContext *s = ctx->priv;
141  int i;
142 
143  for (i = 0; i < ctx->nb_inputs; i++) {
145  av_freep(&s->queues[i]);
146  av_freep(&ctx->input_pads[i].name);
147  }
148 }
149 
150 static int config_output(AVFilterLink *outlink)
151 {
152  AVFilterContext *ctx = outlink->src;
153  AVFilterLink *inlink0 = ctx->inputs[0];
154  int i;
155 
156  if (outlink->type == AVMEDIA_TYPE_VIDEO) {
157  outlink->time_base = AV_TIME_BASE_Q;
158  outlink->w = inlink0->w;
159  outlink->h = inlink0->h;
160  outlink->sample_aspect_ratio = inlink0->sample_aspect_ratio;
161  outlink->format = inlink0->format;
162  outlink->frame_rate = (AVRational) {1, 0};
163  for (i = 1; i < ctx->nb_inputs; i++) {
164  AVFilterLink *inlink = ctx->inputs[i];
165 
166  if (outlink->w != inlink->w ||
167  outlink->h != inlink->h ||
168  outlink->sample_aspect_ratio.num != inlink->sample_aspect_ratio.num ||
169  outlink->sample_aspect_ratio.den != inlink->sample_aspect_ratio.den) {
170  av_log(ctx, AV_LOG_ERROR, "Parameters for input link %s "
171  "(size %dx%d, SAR %d:%d) do not match the corresponding "
172  "output link parameters (%dx%d, SAR %d:%d)\n",
173  ctx->input_pads[i].name, inlink->w, inlink->h,
174  inlink->sample_aspect_ratio.num,
175  inlink->sample_aspect_ratio.den,
176  outlink->w, outlink->h,
177  outlink->sample_aspect_ratio.num,
178  outlink->sample_aspect_ratio.den);
179  return AVERROR(EINVAL);
180  }
181  }
182  }
183 
184  outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
185  return 0;
186 }
187 
188 static int request_frame(AVFilterLink *outlink)
189 {
190  AVFilterContext *ctx = outlink->src;
191  InterleaveContext *s = ctx->priv;
192  int i, ret;
193 
194  for (i = 0; i < ctx->nb_inputs; i++) {
195  if (!s->queues[i].available && !ctx->inputs[i]->closed) {
196  ret = ff_request_frame(ctx->inputs[i]);
197  if (ret != AVERROR_EOF)
198  return ret;
199  }
200  }
201 
202  return push_frame(ctx);
203 }
204 
205 #if CONFIG_INTERLEAVE_FILTER
206 
209 
210 static const AVFilterPad interleave_outputs[] = {
211  {
212  .name = "default",
213  .type = AVMEDIA_TYPE_VIDEO,
214  .config_props = config_output,
215  .request_frame = request_frame,
216  },
217  { NULL }
218 };
219 
220 AVFilter ff_vf_interleave = {
221  .name = "interleave",
222  .description = NULL_IF_CONFIG_SMALL("Temporally interleave video inputs."),
223  .priv_size = sizeof(InterleaveContext),
224  .init = init,
225  .uninit = uninit,
226  .outputs = interleave_outputs,
227  .priv_class = &interleave_class,
229 };
230 
231 #endif
232 
233 #if CONFIG_AINTERLEAVE_FILTER
234 
236 AVFILTER_DEFINE_CLASS(ainterleave);
237 
238 static const AVFilterPad ainterleave_outputs[] = {
239  {
240  .name = "default",
241  .type = AVMEDIA_TYPE_AUDIO,
242  .config_props = config_output,
243  .request_frame = request_frame,
244  },
245  { NULL }
246 };
247 
248 AVFilter ff_af_ainterleave = {
249  .name = "ainterleave",
250  .description = NULL_IF_CONFIG_SMALL("Temporally interleave audio inputs."),
251  .priv_size = sizeof(InterleaveContext),
252  .init = init,
253  .uninit = uninit,
254  .outputs = ainterleave_outputs,
255  .priv_class = &ainterleave_class,
257 };
258 
259 #endif