FFmpeg
f_segment.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #include "config_components.h"
20 
21 #include <stdint.h>
22 
23 #include "libavutil/avstring.h"
25 #include "libavutil/common.h"
26 #include "libavutil/log.h"
27 #include "libavutil/mathematics.h"
28 #include "libavutil/opt.h"
29 #include "libavutil/parseutils.h"
30 #include "libavutil/samplefmt.h"
31 
32 #include "audio.h"
33 #include "avfilter.h"
34 #include "filters.h"
35 #include "internal.h"
36 
37 typedef struct SegmentContext {
38  const AVClass *class;
39 
41  char *points_str;
43 
45  int nb_points;
46  int64_t last_pts;
47 
48  int64_t *points;
50 
51 static void count_points(char *item_str, int *nb_items)
52 {
53  char *p;
54 
55  if (!item_str)
56  return;
57 
58  *nb_items = 1;
59  for (p = item_str; *p; p++) {
60  if (*p == '|')
61  (*nb_items)++;
62  }
63 }
64 
65 static int parse_points(AVFilterContext *ctx, char *item_str, int nb_points, int64_t *points)
66 {
67  SegmentContext *s = ctx->priv;
68  char *arg, *p = item_str;
69  char *saveptr = NULL;
70  int64_t ref, cur = 0;
71  int ret = 0;
72 
73  for (int i = 0; i < nb_points; i++) {
74  if (!(arg = av_strtok(p, "|", &saveptr)))
75  return AVERROR(EINVAL);
76 
77  p = NULL;
78  ref = 0;
79  if (*arg == '+') {
80  ref = cur;
81  arg++;
82  }
83 
84  if (s->use_timestamps) {
85  ret = av_parse_time(&points[i], arg, s->use_timestamps);
86  } else {
87  if (sscanf(arg, "%"SCNd64, &points[i]) != 1)
88  ret = AVERROR(EINVAL);
89  }
90 
91  if (ret < 0) {
92  av_log(ctx, AV_LOG_ERROR, "Invalid splits supplied: %s\n", arg);
93  return ret;
94  }
95 
96  cur = points[i];
97  points[i] += ref;
98  }
99 
100  return 0;
101 }
102 
104 {
105  SegmentContext *s = ctx->priv;
106  char *split_str;
107  int ret;
108 
109  if (s->timestamps_str && s->points_str) {
110  av_log(ctx, AV_LOG_ERROR, "Both timestamps and counts supplied.\n");
111  return AVERROR(EINVAL);
112  } else if (s->timestamps_str) {
113  s->use_timestamps = 1;
114  split_str = s->timestamps_str;
115  } else if (s->points_str) {
116  split_str = s->points_str;
117  } else {
118  av_log(ctx, AV_LOG_ERROR, "Neither timestamps nor durations nor counts supplied.\n");
119  return AVERROR(EINVAL);
120  }
121 
122  count_points(split_str, &s->nb_points);
123  s->nb_points++;
124 
125  s->points = av_calloc(s->nb_points, sizeof(*s->points));
126  if (!s->points)
127  return AVERROR(ENOMEM);
128 
129  ret = parse_points(ctx, split_str, s->nb_points - 1, s->points);
130  if (ret < 0)
131  return ret;
132 
133  s->points[s->nb_points - 1] = INT64_MAX;
134 
135  for (int i = 0; i < s->nb_points; i++) {
136  AVFilterPad pad = { 0 };
137 
138  pad.type = type;
139  pad.name = av_asprintf("output%d", i);
140  if (!pad.name)
141  return AVERROR(ENOMEM);
142 
143  if ((ret = ff_append_outpad_free_name(ctx, &pad)) < 0)
144  return ret;
145  }
146 
147  return 0;
148 }
149 
151 {
152  AVFilterContext *ctx = inlink->dst;
153  SegmentContext *s = ctx->priv;
154  AVRational tb = inlink->time_base;
155 
156  if (s->use_timestamps) {
157  for (int i = 0; i < s->nb_points - 1; i++)
158  s->points[i] = av_rescale_q(s->points[i], AV_TIME_BASE_Q, tb);
159  }
160 
161  return 0;
162 }
163 
165 {
166  SegmentContext *s = ctx->priv;
167  AVFilterLink *inlink = ctx->inputs[0];
168  int ret = 0;
169 
170  if (s->use_timestamps) {
171  ret = frame->pts >= s->points[s->current_point];
172  } else {
173  switch (inlink->type) {
174  case AVMEDIA_TYPE_VIDEO:
175  ret = inlink->frame_count_out - 1 >= s->points[s->current_point];
176  break;
177  case AVMEDIA_TYPE_AUDIO:
178  ret = inlink->sample_count_out - frame->nb_samples >= s->points[s->current_point];
179  break;
180  }
181  }
182 
183  return ret;
184 }
185 
187 {
188  AVFilterLink *inlink = ctx->inputs[0];
189  SegmentContext *s = ctx->priv;
190  AVFrame *frame = NULL;
191  int ret, status;
192  int64_t max_samples;
193  int64_t diff;
194  int64_t pts;
195 
196  for (int i = s->current_point; i < s->nb_points; i++) {
198  }
199 
200  switch (inlink->type) {
201  case AVMEDIA_TYPE_VIDEO:
203  break;
204  case AVMEDIA_TYPE_AUDIO:
205  diff = s->points[s->current_point] - inlink->sample_count_out;
206  while (diff <= 0) {
207  ff_outlink_set_status(ctx->outputs[s->current_point], AVERROR_EOF, s->last_pts);
208  s->current_point++;
209  if (s->current_point >= s->nb_points)
210  return AVERROR(EINVAL);
211 
212  diff = s->points[s->current_point] - inlink->sample_count_out;
213  }
214  if (s->use_timestamps) {
215  max_samples = av_rescale_q(diff, av_make_q(1, inlink->sample_rate), inlink->time_base);
216  } else {
217  max_samples = FFMAX(1, FFMIN(diff, INT_MAX));
218  }
219  if (max_samples <= 0 || max_samples > INT_MAX)
221  else
222  ret = ff_inlink_consume_samples(inlink, 1, max_samples, &frame);
223  break;
224  default:
225  return AVERROR_BUG;
226  }
227 
228  if (ret > 0) {
229  s->last_pts = frame->pts;
231  ff_outlink_set_status(ctx->outputs[s->current_point], AVERROR_EOF, frame->pts);
232  s->current_point++;
233  }
234 
235  if (s->current_point >= s->nb_points) {
237  return AVERROR(EINVAL);
238  }
239 
240  ret = ff_filter_frame(ctx->outputs[s->current_point], frame);
241  }
242 
243  if (ret < 0) {
244  return ret;
245  } else if (ff_inlink_acknowledge_status(inlink, &status, &pts)) {
246  for (int i = s->current_point; i < s->nb_points; i++)
247  ff_outlink_set_status(ctx->outputs[i], status, pts);
248  return 0;
249  } else {
250  for (int i = s->current_point; i < s->nb_points; i++) {
251  if (ff_outlink_frame_wanted(ctx->outputs[i]))
253  }
254  return 0;
255  }
256 }
257 
259 {
260  SegmentContext *s = ctx->priv;
261 
262  av_freep(&s->points);
263 }
264 
265 #define OFFSET(x) offsetof(SegmentContext, x)
266 #define COMMON_OPTS \
267  { "timestamps", "timestamps of input at which to split input", OFFSET(timestamps_str), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS }, \
268 
269 #if CONFIG_SEGMENT_FILTER
270 
271 static av_cold int video_init(AVFilterContext *ctx)
272 {
273  return init(ctx, AVMEDIA_TYPE_VIDEO);
274 }
275 
276 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
277 static const AVOption segment_options[] = {
279  { "frames", "frames at which to split input", OFFSET(points_str), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
280  { NULL }
281 };
282 #undef FLAGS
283 
285 
286 static const AVFilterPad segment_inputs[] = {
287  {
288  .name = "default",
289  .type = AVMEDIA_TYPE_VIDEO,
290  .config_props = config_input,
291  },
292 };
293 
294 const AVFilter ff_vf_segment = {
295  .name = "segment",
296  .description = NULL_IF_CONFIG_SMALL("Segment video stream."),
297  .init = video_init,
298  .uninit = uninit,
299  .priv_size = sizeof(SegmentContext),
300  .priv_class = &segment_class,
301  .activate = activate,
302  FILTER_INPUTS(segment_inputs),
303  .outputs = NULL,
305 };
306 #endif // CONFIG_SEGMENT_FILTER
307 
308 #if CONFIG_ASEGMENT_FILTER
309 
310 static av_cold int audio_init(AVFilterContext *ctx)
311 {
312  return init(ctx, AVMEDIA_TYPE_AUDIO);
313 }
314 
315 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
316 static const AVOption asegment_options[] = {
318  { "samples", "samples at which to split input", OFFSET(points_str), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
319  { NULL }
320 };
321 #undef FLAGS
322 
323 AVFILTER_DEFINE_CLASS(asegment);
324 
325 static const AVFilterPad asegment_inputs[] = {
326  {
327  .name = "default",
328  .type = AVMEDIA_TYPE_AUDIO,
329  .config_props = config_input,
330  },
331 };
332 
333 const AVFilter ff_af_asegment = {
334  .name = "asegment",
335  .description = NULL_IF_CONFIG_SMALL("Segment audio stream."),
336  .init = audio_init,
337  .uninit = uninit,
338  .priv_size = sizeof(SegmentContext),
339  .priv_class = &asegment_class,
340  .activate = activate,
341  FILTER_INPUTS(asegment_inputs),
342  .outputs = NULL,
344 };
345 #endif // CONFIG_ASEGMENT_FILTER
current_segment_finished
static int current_segment_finished(AVFilterContext *ctx, AVFrame *frame)
Definition: f_segment.c:164
SegmentContext::points_str
char * points_str
Definition: f_segment.c:41
status
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
parse_points
static int parse_points(AVFilterContext *ctx, char *item_str, int nb_points, int64_t *points)
Definition: f_segment.c:65
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:999
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
ff_af_asegment
const AVFilter ff_af_asegment
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:116
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:111
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
AVOption
AVOption.
Definition: opt.h:251
FLAGS
#define FLAGS
Definition: cmdutils.c:509
mathematics.h
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:175
ff_inlink_consume_frame
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
Definition: avfilter.c:1394
FF_FILTER_FORWARD_STATUS_BACK_ALL
#define FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, filter)
Forward the status on an output link to all input links.
Definition: filters.h:212
OFFSET
#define OFFSET(x)
Definition: f_segment.c:265
samplefmt.h
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
pts
static int64_t pts
Definition: transcode_aac.c:654
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:49
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
ff_outlink_set_status
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:189
ff_inlink_request_frame
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
Definition: avfilter.c:1511
s
#define s(width, name)
Definition: cbs_vp9.c:256
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
av_strtok
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
Definition: avstring.c:189
filters.h
ctx
AVFormatContext * ctx
Definition: movenc.c:48
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:190
arg
const char * arg
Definition: jacosubdec.c:67
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
ff_inlink_consume_samples
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
Definition: avfilter.c:1413
NULL
#define NULL
Definition: coverity.c:32
activate
static int activate(AVFilterContext *ctx)
Definition: f_segment.c:186
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
parseutils.h
av_parse_time
int av_parse_time(int64_t *timeval, const char *timestr, int duration)
Parse timestr and return in *time a corresponding number of microseconds.
Definition: parseutils.c:589
COMMON_OPTS
#define COMMON_OPTS
Definition: f_segment.c:266
count_points
static void count_points(char *item_str, int *nb_items)
Definition: f_segment.c:51
ff_inlink_acknowledge_status
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1348
AVFILTER_FLAG_DYNAMIC_OUTPUTS
#define AVFILTER_FLAG_DYNAMIC_OUTPUTS
The number of the filter outputs is not determined just by AVFilter.outputs.
Definition: avfilter.h:122
AVMediaType
AVMediaType
Definition: avutil.h:199
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
init
static av_cold int init(AVFilterContext *ctx, enum AVMediaType type)
Definition: f_segment.c:103
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
SegmentContext
Definition: f_segment.c:37
SegmentContext::current_point
int current_point
Definition: f_segment.c:44
internal.h
AVFILTER_DEFINE_CLASS
#define AVFILTER_DEFINE_CLASS(fname)
Definition: internal.h:325
log.h
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
ff_vf_segment
const AVFilter ff_vf_segment
common.h
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
SegmentContext::nb_points
int nb_points
Definition: f_segment.c:45
tb
#define tb
Definition: regdef.h:68
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:55
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:272
AVFilter
Filter definition.
Definition: avfilter.h:171
SegmentContext::use_timestamps
int use_timestamps
Definition: f_segment.c:42
ret
ret
Definition: filter_design.txt:187
AVFilterPad::type
enum AVMediaType type
AVFilterPad type.
Definition: internal.h:60
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
channel_layout.h
SegmentContext::timestamps_str
char * timestamps_str
Definition: f_segment.c:40
avfilter.h
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: f_segment.c:258
AVFILTER_FLAG_METADATA_ONLY
#define AVFILTER_FLAG_METADATA_ONLY
The filter is a "metadata" filter - it does not modify the frame data in any way.
Definition: avfilter.h:143
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
segment
Definition: hls.c:75
AVFilterContext
An instance of a filter.
Definition: avfilter.h:408
SegmentContext::last_pts
int64_t last_pts
Definition: f_segment.c:46
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
audio.h
SegmentContext::points
int64_t * points
Definition: f_segment.c:48
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:139
ff_append_outpad_free_name
int ff_append_outpad_free_name(AVFilterContext *f, AVFilterPad *p)
Definition: avfilter.c:134
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
config_input
static int config_input(AVFilterLink *inlink)
Definition: f_segment.c:150
ff_outlink_frame_wanted
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function. If this function returns true
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:229