FFmpeg
trim.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #include <stdint.h>
20 
21 #include "config.h"
22 
24 #include "libavutil/common.h"
25 #include "libavutil/log.h"
26 #include "libavutil/mathematics.h"
27 #include "libavutil/opt.h"
28 #include "libavutil/samplefmt.h"
29 
30 #include "audio.h"
31 #include "avfilter.h"
32 #include "internal.h"
33 
34 typedef struct TrimContext {
35  const AVClass *class;
36 
37  /*
38  * AVOptions
39  */
40  int64_t duration;
41  int64_t start_time, end_time;
43  /*
44  * in the link timebase for video,
45  * in 1/samplerate for audio
46  */
47  int64_t start_pts, end_pts;
49 
50  /*
51  * number of video frames that arrived on this filter so far
52  */
53  int64_t nb_frames;
54  /*
55  * number of audio samples that arrived on this filter so far
56  */
57  int64_t nb_samples;
58  /*
59  * timestamp of the first frame in the output, in the timebase units
60  */
61  int64_t first_pts;
62  /*
63  * duration in the timebase units
64  */
65  int64_t duration_tb;
66 
67  int64_t next_pts;
68 
69  int eof;
70 } TrimContext;
71 
73 {
74  TrimContext *s = ctx->priv;
75 
76  s->first_pts = AV_NOPTS_VALUE;
77 
78  return 0;
79 }
80 
82 {
83  AVFilterContext *ctx = inlink->dst;
84  TrimContext *s = ctx->priv;
85  AVRational tb = (inlink->type == AVMEDIA_TYPE_VIDEO) ?
86  inlink->time_base : (AVRational){ 1, inlink->sample_rate };
87 
88  if (s->start_time != INT64_MAX) {
89  int64_t start_pts = av_rescale_q(s->start_time, AV_TIME_BASE_Q, tb);
90  if (s->start_pts == AV_NOPTS_VALUE || start_pts < s->start_pts)
91  s->start_pts = start_pts;
92  }
93  if (s->end_time != INT64_MAX) {
94  int64_t end_pts = av_rescale_q(s->end_time, AV_TIME_BASE_Q, tb);
95  if (s->end_pts == AV_NOPTS_VALUE || end_pts > s->end_pts)
96  s->end_pts = end_pts;
97  }
98  if (s->duration)
99  s->duration_tb = av_rescale_q(s->duration, AV_TIME_BASE_Q, tb);
100 
101  return 0;
102 }
103 
104 #define OFFSET(x) offsetof(TrimContext, x)
105 #define COMMON_OPTS \
106  { "start", "Timestamp of the first frame that " \
107  "should be passed", OFFSET(start_time), AV_OPT_TYPE_DURATION, { .i64 = INT64_MAX }, INT64_MIN, INT64_MAX, FLAGS }, \
108  { "starti", "Timestamp of the first frame that " \
109  "should be passed", OFFSET(start_time), AV_OPT_TYPE_DURATION, { .i64 = INT64_MAX }, INT64_MIN, INT64_MAX, FLAGS }, \
110  { "end", "Timestamp of the first frame that " \
111  "should be dropped again", OFFSET(end_time), AV_OPT_TYPE_DURATION, { .i64 = INT64_MAX }, INT64_MIN, INT64_MAX, FLAGS }, \
112  { "endi", "Timestamp of the first frame that " \
113  "should be dropped again", OFFSET(end_time), AV_OPT_TYPE_DURATION, { .i64 = INT64_MAX }, INT64_MIN, INT64_MAX, FLAGS }, \
114  { "start_pts", "Timestamp of the first frame that should be " \
115  " passed", OFFSET(start_pts), AV_OPT_TYPE_INT64, { .i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, FLAGS }, \
116  { "end_pts", "Timestamp of the first frame that should be " \
117  "dropped again", OFFSET(end_pts), AV_OPT_TYPE_INT64, { .i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, FLAGS }, \
118  { "duration", "Maximum duration of the output", OFFSET(duration), AV_OPT_TYPE_DURATION, { .i64 = 0 }, 0, INT64_MAX, FLAGS }, \
119  { "durationi", "Maximum duration of the output", OFFSET(duration), AV_OPT_TYPE_DURATION, { .i64 = 0 }, 0, INT64_MAX, FLAGS },
120 
121 
122 #if CONFIG_TRIM_FILTER
123 static int trim_filter_frame(AVFilterLink *inlink, AVFrame *frame)
124 {
125  AVFilterContext *ctx = inlink->dst;
126  TrimContext *s = ctx->priv;
127  int drop;
128 
129  /* drop everything if EOF has already been returned */
130  if (s->eof) {
132  return 0;
133  }
134 
135  if (s->start_frame >= 0 || s->start_pts != AV_NOPTS_VALUE) {
136  drop = 1;
137  if (s->start_frame >= 0 && s->nb_frames >= s->start_frame)
138  drop = 0;
139  if (s->start_pts != AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE &&
140  frame->pts >= s->start_pts)
141  drop = 0;
142  if (drop)
143  goto drop;
144  }
145 
146  if (s->first_pts == AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE)
147  s->first_pts = frame->pts;
148 
149  if (s->end_frame != INT64_MAX || s->end_pts != AV_NOPTS_VALUE || s->duration_tb) {
150  drop = 1;
151 
152  if (s->end_frame != INT64_MAX && s->nb_frames < s->end_frame)
153  drop = 0;
154  if (s->end_pts != AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE &&
155  frame->pts < s->end_pts)
156  drop = 0;
157  if (s->duration_tb && frame->pts != AV_NOPTS_VALUE &&
158  frame->pts - s->first_pts < s->duration_tb)
159  drop = 0;
160 
161  if (drop) {
162  s->eof = 1;
164  goto drop;
165  }
166  }
167 
168  s->nb_frames++;
169 
170  return ff_filter_frame(ctx->outputs[0], frame);
171 
172 drop:
173  s->nb_frames++;
175  return 0;
176 }
177 
178 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
179 static const AVOption trim_options[] = {
181  { "start_frame", "Number of the first frame that should be passed "
182  "to the output", OFFSET(start_frame), AV_OPT_TYPE_INT64, { .i64 = -1 }, -1, INT64_MAX, FLAGS },
183  { "end_frame", "Number of the first frame that should be dropped "
184  "again", OFFSET(end_frame), AV_OPT_TYPE_INT64, { .i64 = INT64_MAX }, 0, INT64_MAX, FLAGS },
185  { NULL }
186 };
187 #undef FLAGS
188 
190 
191 static const AVFilterPad trim_inputs[] = {
192  {
193  .name = "default",
194  .type = AVMEDIA_TYPE_VIDEO,
195  .filter_frame = trim_filter_frame,
196  .config_props = config_input,
197  },
198 };
199 
200 static const AVFilterPad trim_outputs[] = {
201  {
202  .name = "default",
203  .type = AVMEDIA_TYPE_VIDEO,
204  },
205 };
206 
207 const AVFilter ff_vf_trim = {
208  .name = "trim",
209  .description = NULL_IF_CONFIG_SMALL("Pick one continuous section from the input, drop the rest."),
210  .init = init,
211  .priv_size = sizeof(TrimContext),
212  .priv_class = &trim_class,
213  FILTER_INPUTS(trim_inputs),
214  FILTER_OUTPUTS(trim_outputs),
215 };
216 #endif // CONFIG_TRIM_FILTER
217 
218 #if CONFIG_ATRIM_FILTER
219 static int atrim_filter_frame(AVFilterLink *inlink, AVFrame *frame)
220 {
221  AVFilterContext *ctx = inlink->dst;
222  TrimContext *s = ctx->priv;
223  int64_t start_sample, end_sample;
224  int64_t pts;
225  int drop;
226 
227  /* drop everything if EOF has already been returned */
228  if (s->eof) {
230  return 0;
231  }
232 
233  if (frame->pts != AV_NOPTS_VALUE)
234  pts = av_rescale_q(frame->pts, inlink->time_base,
235  (AVRational){ 1, inlink->sample_rate });
236  else
237  pts = s->next_pts;
238  s->next_pts = pts + frame->nb_samples;
239 
240  /* check if at least a part of the frame is after the start time */
241  if (s->start_sample < 0 && s->start_pts == AV_NOPTS_VALUE) {
242  start_sample = 0;
243  } else {
244  drop = 1;
245  start_sample = frame->nb_samples;
246 
247  if (s->start_sample >= 0 &&
248  s->nb_samples + frame->nb_samples > s->start_sample) {
249  drop = 0;
250  start_sample = FFMIN(start_sample, s->start_sample - s->nb_samples);
251  }
252 
253  if (s->start_pts != AV_NOPTS_VALUE && pts != AV_NOPTS_VALUE &&
254  pts + frame->nb_samples > s->start_pts) {
255  drop = 0;
256  start_sample = FFMIN(start_sample, s->start_pts - pts);
257  }
258 
259  if (drop)
260  goto drop;
261  }
262 
263  if (s->first_pts == AV_NOPTS_VALUE)
264  s->first_pts = pts + start_sample;
265 
266  /* check if at least a part of the frame is before the end time */
267  if (s->end_sample == INT64_MAX && s->end_pts == AV_NOPTS_VALUE && !s->duration_tb) {
268  end_sample = frame->nb_samples;
269  } else {
270  drop = 1;
271  end_sample = 0;
272 
273  if (s->end_sample != INT64_MAX &&
274  s->nb_samples < s->end_sample) {
275  drop = 0;
276  end_sample = FFMAX(end_sample, s->end_sample - s->nb_samples);
277  }
278 
279  if (s->end_pts != AV_NOPTS_VALUE && pts != AV_NOPTS_VALUE &&
280  pts < s->end_pts) {
281  drop = 0;
282  end_sample = FFMAX(end_sample, s->end_pts - pts);
283  }
284 
285  if (s->duration_tb && pts - s->first_pts < s->duration_tb) {
286  drop = 0;
287  end_sample = FFMAX(end_sample, s->first_pts + s->duration_tb - pts);
288  }
289 
290  if (drop) {
291  s->eof = 1;
293  goto drop;
294  }
295  }
296 
297  s->nb_samples += frame->nb_samples;
298  start_sample = FFMAX(0, start_sample);
299  end_sample = FFMIN(frame->nb_samples, end_sample);
300  if (start_sample >= end_sample || !frame->nb_samples)
301  goto drop;
302 
303  if (start_sample) {
304  AVFrame *out = ff_get_audio_buffer(ctx->outputs[0], end_sample - start_sample);
305  if (!out) {
307  return AVERROR(ENOMEM);
308  }
309 
311  av_samples_copy(out->extended_data, frame->extended_data, 0, start_sample,
312  out->nb_samples, inlink->channels,
313  frame->format);
314  if (out->pts != AV_NOPTS_VALUE)
315  out->pts += av_rescale_q(start_sample, (AVRational){ 1, out->sample_rate },
316  inlink->time_base);
317 
319  frame = out;
320  } else
321  frame->nb_samples = end_sample;
322 
323  return ff_filter_frame(ctx->outputs[0], frame);
324 
325 drop:
326  s->nb_samples += frame->nb_samples;
328  return 0;
329 }
330 
331 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
332 static const AVOption atrim_options[] = {
334  { "start_sample", "Number of the first audio sample that should be "
335  "passed to the output", OFFSET(start_sample), AV_OPT_TYPE_INT64, { .i64 = -1 }, -1, INT64_MAX, FLAGS },
336  { "end_sample", "Number of the first audio sample that should be "
337  "dropped again", OFFSET(end_sample), AV_OPT_TYPE_INT64, { .i64 = INT64_MAX }, 0, INT64_MAX, FLAGS },
338  { NULL }
339 };
340 #undef FLAGS
341 
342 AVFILTER_DEFINE_CLASS(atrim);
343 
344 static const AVFilterPad atrim_inputs[] = {
345  {
346  .name = "default",
347  .type = AVMEDIA_TYPE_AUDIO,
348  .filter_frame = atrim_filter_frame,
349  .config_props = config_input,
350  },
351 };
352 
353 static const AVFilterPad atrim_outputs[] = {
354  {
355  .name = "default",
356  .type = AVMEDIA_TYPE_AUDIO,
357  },
358 };
359 
360 const AVFilter ff_af_atrim = {
361  .name = "atrim",
362  .description = NULL_IF_CONFIG_SMALL("Pick one continuous section from the input, drop the rest."),
363  .init = init,
364  .priv_size = sizeof(TrimContext),
365  .priv_class = &atrim_class,
366  FILTER_INPUTS(atrim_inputs),
367  FILTER_OUTPUTS(atrim_outputs),
368 };
369 #endif // CONFIG_ATRIM_FILTER
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:88
TrimContext::eof
int eof
Definition: trim.c:69
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
out
FILE * out
Definition: movenc.c:54
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1019
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:112
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
TrimContext::duration_tb
int64_t duration_tb
Definition: trim.c:65
AVOption
AVOption.
Definition: opt.h:247
TrimContext::nb_samples
int64_t nb_samples
Definition: trim.c:57
TrimContext::start_sample
int64_t start_sample
Definition: trim.c:48
FLAGS
#define FLAGS
Definition: cmdutils.c:535
mathematics.h
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:153
samplefmt.h
pts
static int64_t pts
Definition: transcode_aac.c:653
TrimContext::start_pts
int64_t start_pts
Definition: trim.c:47
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:50
av_cold
#define av_cold
Definition: attributes.h:90
COMMON_OPTS
#define COMMON_OPTS
Definition: trim.c:105
ff_avfilter_link_set_out_status
void ff_avfilter_link_set_out_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the destination filter.
Definition: avfilter.c:243
s
#define s(width, name)
Definition: cbs_vp9.c:257
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
init
static av_cold int init(AVFilterContext *ctx)
Definition: trim.c:72
AV_OPT_TYPE_INT64
@ AV_OPT_TYPE_INT64
Definition: opt.h:225
ctx
AVFormatContext * ctx
Definition: movenc.c:48
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:141
OFFSET
#define OFFSET(x)
Definition: trim.c:104
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:152
ff_af_atrim
const AVFilter ff_af_atrim
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:537
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
TrimContext::duration
int64_t duration
Definition: trim.c:40
TrimContext::next_pts
int64_t next_pts
Definition: trim.c:67
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
TrimContext::end_frame
int64_t end_frame
Definition: trim.c:42
internal.h
AVFILTER_DEFINE_CLASS
#define AVFILTER_DEFINE_CLASS(fname)
Definition: internal.h:289
av_samples_copy
int av_samples_copy(uint8_t **dst, uint8_t *const *src, int dst_offset, int src_offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Copy samples from src to dst.
Definition: samplefmt.c:220
log.h
common.h
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
TrimContext::end_time
int64_t end_time
Definition: trim.c:41
tb
#define tb
Definition: regdef.h:68
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:56
config_input
static int config_input(AVFilterLink *inlink)
Definition: trim.c:81
ff_vf_trim
const AVFilter ff_vf_trim
AVFilter
Filter definition.
Definition: avfilter.h:149
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
TrimContext::end_sample
int64_t end_sample
Definition: trim.c:48
TrimContext::start_frame
int64_t start_frame
Definition: trim.c:42
TrimContext::nb_frames
int64_t nb_frames
Definition: trim.c:53
TrimContext::start_time
int64_t start_time
Definition: trim.c:41
channel_layout.h
avfilter.h
AVFilterContext
An instance of a filter.
Definition: avfilter.h:346
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
audio.h
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:153
TrimContext
Definition: trim.c:34
TrimContext::first_pts
int64_t first_pts
Definition: trim.c:61
TrimContext::end_pts
int64_t end_pts
Definition: trim.c:47