FFmpeg
trim.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #include <stdint.h>
20 
21 #include "config.h"
22 
23 #include "libavutil/avassert.h"
25 #include "libavutil/common.h"
26 #include "libavutil/log.h"
27 #include "libavutil/mathematics.h"
28 #include "libavutil/opt.h"
29 #include "libavutil/samplefmt.h"
30 
31 #include "audio.h"
32 #include "avfilter.h"
33 #include "internal.h"
34 
35 typedef struct TrimContext {
36  const AVClass *class;
37 
38  /*
39  * AVOptions
40  */
41  int64_t duration;
42  int64_t start_time, end_time;
44  /*
45  * in the link timebase for video,
46  * in 1/samplerate for audio
47  */
48  int64_t start_pts, end_pts;
50 
51  /*
52  * number of video frames that arrived on this filter so far
53  */
54  int64_t nb_frames;
55  /*
56  * number of audio samples that arrived on this filter so far
57  */
58  int64_t nb_samples;
59  /*
60  * timestamp of the first frame in the output, in the timebase units
61  */
62  int64_t first_pts;
63  /*
64  * duration in the timebase units
65  */
66  int64_t duration_tb;
67 
68  int64_t next_pts;
69 
70  int eof;
71 } TrimContext;
72 
74 {
75  TrimContext *s = ctx->priv;
76 
77  s->first_pts = AV_NOPTS_VALUE;
78 
79  return 0;
80 }
81 
83 {
84  AVFilterContext *ctx = inlink->dst;
85  TrimContext *s = ctx->priv;
86  AVRational tb = (inlink->type == AVMEDIA_TYPE_VIDEO) ?
87  inlink->time_base : (AVRational){ 1, inlink->sample_rate };
88 
89  if (s->start_time != INT64_MAX) {
90  int64_t start_pts = av_rescale_q(s->start_time, AV_TIME_BASE_Q, tb);
91  if (s->start_pts == AV_NOPTS_VALUE || start_pts < s->start_pts)
92  s->start_pts = start_pts;
93  }
94  if (s->end_time != INT64_MAX) {
95  int64_t end_pts = av_rescale_q(s->end_time, AV_TIME_BASE_Q, tb);
96  if (s->end_pts == AV_NOPTS_VALUE || end_pts > s->end_pts)
97  s->end_pts = end_pts;
98  }
99  if (s->duration)
100  s->duration_tb = av_rescale_q(s->duration, AV_TIME_BASE_Q, tb);
101 
102  return 0;
103 }
104 
105 #define OFFSET(x) offsetof(TrimContext, x)
106 #define COMMON_OPTS \
107  { "start", "Timestamp of the first frame that " \
108  "should be passed", OFFSET(start_time), AV_OPT_TYPE_DURATION, { .i64 = INT64_MAX }, INT64_MIN, INT64_MAX, FLAGS }, \
109  { "starti", "Timestamp of the first frame that " \
110  "should be passed", OFFSET(start_time), AV_OPT_TYPE_DURATION, { .i64 = INT64_MAX }, INT64_MIN, INT64_MAX, FLAGS }, \
111  { "end", "Timestamp of the first frame that " \
112  "should be dropped again", OFFSET(end_time), AV_OPT_TYPE_DURATION, { .i64 = INT64_MAX }, INT64_MIN, INT64_MAX, FLAGS }, \
113  { "endi", "Timestamp of the first frame that " \
114  "should be dropped again", OFFSET(end_time), AV_OPT_TYPE_DURATION, { .i64 = INT64_MAX }, INT64_MIN, INT64_MAX, FLAGS }, \
115  { "start_pts", "Timestamp of the first frame that should be " \
116  " passed", OFFSET(start_pts), AV_OPT_TYPE_INT64, { .i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, FLAGS }, \
117  { "end_pts", "Timestamp of the first frame that should be " \
118  "dropped again", OFFSET(end_pts), AV_OPT_TYPE_INT64, { .i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, FLAGS }, \
119  { "duration", "Maximum duration of the output", OFFSET(duration), AV_OPT_TYPE_DURATION, { .i64 = 0 }, 0, INT64_MAX, FLAGS }, \
120  { "durationi", "Maximum duration of the output", OFFSET(duration), AV_OPT_TYPE_DURATION, { .i64 = 0 }, 0, INT64_MAX, FLAGS },
121 
122 
123 #if CONFIG_TRIM_FILTER
124 static int trim_filter_frame(AVFilterLink *inlink, AVFrame *frame)
125 {
126  AVFilterContext *ctx = inlink->dst;
127  TrimContext *s = ctx->priv;
128  int drop;
129 
130  /* drop everything if EOF has already been returned */
131  if (s->eof) {
133  return 0;
134  }
135 
136  if (s->start_frame >= 0 || s->start_pts != AV_NOPTS_VALUE) {
137  drop = 1;
138  if (s->start_frame >= 0 && s->nb_frames >= s->start_frame)
139  drop = 0;
140  if (s->start_pts != AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE &&
141  frame->pts >= s->start_pts)
142  drop = 0;
143  if (drop)
144  goto drop;
145  }
146 
147  if (s->first_pts == AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE)
148  s->first_pts = frame->pts;
149 
150  if (s->end_frame != INT64_MAX || s->end_pts != AV_NOPTS_VALUE || s->duration_tb) {
151  drop = 1;
152 
153  if (s->end_frame != INT64_MAX && s->nb_frames < s->end_frame)
154  drop = 0;
155  if (s->end_pts != AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE &&
156  frame->pts < s->end_pts)
157  drop = 0;
158  if (s->duration_tb && frame->pts != AV_NOPTS_VALUE &&
159  frame->pts - s->first_pts < s->duration_tb)
160  drop = 0;
161 
162  if (drop) {
163  s->eof = 1;
165  goto drop;
166  }
167  }
168 
169  s->nb_frames++;
170 
171  return ff_filter_frame(ctx->outputs[0], frame);
172 
173 drop:
174  s->nb_frames++;
176  return 0;
177 }
178 
179 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
180 static const AVOption trim_options[] = {
182  { "start_frame", "Number of the first frame that should be passed "
183  "to the output", OFFSET(start_frame), AV_OPT_TYPE_INT64, { .i64 = -1 }, -1, INT64_MAX, FLAGS },
184  { "end_frame", "Number of the first frame that should be dropped "
185  "again", OFFSET(end_frame), AV_OPT_TYPE_INT64, { .i64 = INT64_MAX }, 0, INT64_MAX, FLAGS },
186  { NULL }
187 };
188 #undef FLAGS
189 
191 
192 static const AVFilterPad trim_inputs[] = {
193  {
194  .name = "default",
195  .type = AVMEDIA_TYPE_VIDEO,
196  .filter_frame = trim_filter_frame,
197  .config_props = config_input,
198  },
199  { NULL }
200 };
201 
202 static const AVFilterPad trim_outputs[] = {
203  {
204  .name = "default",
205  .type = AVMEDIA_TYPE_VIDEO,
206  },
207  { NULL }
208 };
209 
211  .name = "trim",
212  .description = NULL_IF_CONFIG_SMALL("Pick one continuous section from the input, drop the rest."),
213  .init = init,
214  .priv_size = sizeof(TrimContext),
215  .priv_class = &trim_class,
216  .inputs = trim_inputs,
217  .outputs = trim_outputs,
218 };
219 #endif // CONFIG_TRIM_FILTER
220 
221 #if CONFIG_ATRIM_FILTER
222 static int atrim_filter_frame(AVFilterLink *inlink, AVFrame *frame)
223 {
224  AVFilterContext *ctx = inlink->dst;
225  TrimContext *s = ctx->priv;
226  int64_t start_sample, end_sample;
227  int64_t pts;
228  int drop;
229 
230  /* drop everything if EOF has already been returned */
231  if (s->eof) {
233  return 0;
234  }
235 
236  if (frame->pts != AV_NOPTS_VALUE)
237  pts = av_rescale_q(frame->pts, inlink->time_base,
238  (AVRational){ 1, inlink->sample_rate });
239  else
240  pts = s->next_pts;
241  s->next_pts = pts + frame->nb_samples;
242 
243  /* check if at least a part of the frame is after the start time */
244  if (s->start_sample < 0 && s->start_pts == AV_NOPTS_VALUE) {
245  start_sample = 0;
246  } else {
247  drop = 1;
248  start_sample = frame->nb_samples;
249 
250  if (s->start_sample >= 0 &&
251  s->nb_samples + frame->nb_samples > s->start_sample) {
252  drop = 0;
253  start_sample = FFMIN(start_sample, s->start_sample - s->nb_samples);
254  }
255 
256  if (s->start_pts != AV_NOPTS_VALUE && pts != AV_NOPTS_VALUE &&
257  pts + frame->nb_samples > s->start_pts) {
258  drop = 0;
259  start_sample = FFMIN(start_sample, s->start_pts - pts);
260  }
261 
262  if (drop)
263  goto drop;
264  }
265 
266  if (s->first_pts == AV_NOPTS_VALUE)
267  s->first_pts = pts + start_sample;
268 
269  /* check if at least a part of the frame is before the end time */
270  if (s->end_sample == INT64_MAX && s->end_pts == AV_NOPTS_VALUE && !s->duration_tb) {
271  end_sample = frame->nb_samples;
272  } else {
273  drop = 1;
274  end_sample = 0;
275 
276  if (s->end_sample != INT64_MAX &&
277  s->nb_samples < s->end_sample) {
278  drop = 0;
279  end_sample = FFMAX(end_sample, s->end_sample - s->nb_samples);
280  }
281 
282  if (s->end_pts != AV_NOPTS_VALUE && pts != AV_NOPTS_VALUE &&
283  pts < s->end_pts) {
284  drop = 0;
285  end_sample = FFMAX(end_sample, s->end_pts - pts);
286  }
287 
288  if (s->duration_tb && pts - s->first_pts < s->duration_tb) {
289  drop = 0;
290  end_sample = FFMAX(end_sample, s->first_pts + s->duration_tb - pts);
291  }
292 
293  if (drop) {
294  s->eof = 1;
296  goto drop;
297  }
298  }
299 
300  s->nb_samples += frame->nb_samples;
301  start_sample = FFMAX(0, start_sample);
302  end_sample = FFMIN(frame->nb_samples, end_sample);
303  av_assert0(start_sample < end_sample || (start_sample == end_sample && !frame->nb_samples));
304 
305  if (start_sample) {
306  AVFrame *out = ff_get_audio_buffer(ctx->outputs[0], end_sample - start_sample);
307  if (!out) {
309  return AVERROR(ENOMEM);
310  }
311 
313  av_samples_copy(out->extended_data, frame->extended_data, 0, start_sample,
314  out->nb_samples, inlink->channels,
315  frame->format);
316  if (out->pts != AV_NOPTS_VALUE)
317  out->pts += av_rescale_q(start_sample, (AVRational){ 1, out->sample_rate },
318  inlink->time_base);
319 
321  frame = out;
322  } else
323  frame->nb_samples = end_sample;
324 
325  return ff_filter_frame(ctx->outputs[0], frame);
326 
327 drop:
328  s->nb_samples += frame->nb_samples;
330  return 0;
331 }
332 
333 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
334 static const AVOption atrim_options[] = {
336  { "start_sample", "Number of the first audio sample that should be "
337  "passed to the output", OFFSET(start_sample), AV_OPT_TYPE_INT64, { .i64 = -1 }, -1, INT64_MAX, FLAGS },
338  { "end_sample", "Number of the first audio sample that should be "
339  "dropped again", OFFSET(end_sample), AV_OPT_TYPE_INT64, { .i64 = INT64_MAX }, 0, INT64_MAX, FLAGS },
340  { NULL }
341 };
342 #undef FLAGS
343 
344 AVFILTER_DEFINE_CLASS(atrim);
345 
346 static const AVFilterPad atrim_inputs[] = {
347  {
348  .name = "default",
349  .type = AVMEDIA_TYPE_AUDIO,
350  .filter_frame = atrim_filter_frame,
351  .config_props = config_input,
352  },
353  { NULL }
354 };
355 
356 static const AVFilterPad atrim_outputs[] = {
357  {
358  .name = "default",
359  .type = AVMEDIA_TYPE_AUDIO,
360  },
361  { NULL }
362 };
363 
365  .name = "atrim",
366  .description = NULL_IF_CONFIG_SMALL("Pick one continuous section from the input, drop the rest."),
367  .init = init,
368  .priv_size = sizeof(TrimContext),
369  .priv_class = &atrim_class,
370  .inputs = atrim_inputs,
371  .outputs = atrim_outputs,
372 };
373 #endif // CONFIG_ATRIM_FILTER
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:86
TrimContext::eof
int eof
Definition: trim.c:70
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
out
FILE * out
Definition: movenc.c:54
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:55
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
TrimContext::duration_tb
int64_t duration_tb
Definition: trim.c:66
AVOption
AVOption.
Definition: opt.h:246
TrimContext::nb_samples
int64_t nb_samples
Definition: trim.c:58
TrimContext::start_sample
int64_t start_sample
Definition: trim.c:49
FLAGS
#define FLAGS
Definition: cmdutils.c:544
mathematics.h
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:148
samplefmt.h
pts
static int64_t pts
Definition: transcode_aac.c:647
TrimContext::start_pts
int64_t start_pts
Definition: trim.c:48
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:54
avassert.h
av_cold
#define av_cold
Definition: attributes.h:84
COMMON_OPTS
#define COMMON_OPTS
Definition: trim.c:106
ff_avfilter_link_set_out_status
void ff_avfilter_link_set_out_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the destination filter.
Definition: avfilter.c:224
s
#define s(width, name)
Definition: cbs_vp9.c:257
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
init
static av_cold int init(AVFilterContext *ctx)
Definition: trim.c:73
AV_OPT_TYPE_INT64
@ AV_OPT_TYPE_INT64
Definition: opt.h:224
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
outputs
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
ctx
AVFormatContext * ctx
Definition: movenc.c:48
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
OFFSET
#define OFFSET(x)
Definition: trim.c:105
ff_vf_trim
AVFilter ff_vf_trim
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:654
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
TrimContext::duration
int64_t duration
Definition: trim.c:41
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
TrimContext::next_pts
int64_t next_pts
Definition: trim.c:68
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
TrimContext::end_frame
int64_t end_frame
Definition: trim.c:43
internal.h
AVFILTER_DEFINE_CLASS
#define AVFILTER_DEFINE_CLASS(fname)
Definition: internal.h:334
av_samples_copy
int av_samples_copy(uint8_t **dst, uint8_t *const *src, int dst_offset, int src_offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Copy samples from src to dst.
Definition: samplefmt.c:213
log.h
common.h
TrimContext::end_time
int64_t end_time
Definition: trim.c:42
tb
#define tb
Definition: regdef.h:68
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
config_input
static int config_input(AVFilterLink *inlink)
Definition: trim.c:82
AVFilter
Filter definition.
Definition: avfilter.h:144
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
TrimContext::end_sample
int64_t end_sample
Definition: trim.c:49
TrimContext::start_frame
int64_t start_frame
Definition: trim.c:43
TrimContext::nb_frames
int64_t nb_frames
Definition: trim.c:54
TrimContext::start_time
int64_t start_time
Definition: trim.c:42
channel_layout.h
config.h
avfilter.h
ff_af_atrim
AVFilter ff_af_atrim
AVFilterContext
An instance of a filter.
Definition: avfilter.h:338
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
audio.h
TrimContext
Definition: trim.c:35
TrimContext::first_pts
int64_t first_pts
Definition: trim.c:62
TrimContext::end_pts
int64_t end_pts
Definition: trim.c:48