FFmpeg
f_segment.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #include <stdint.h>
20 
21 #include "libavutil/avstring.h"
23 #include "libavutil/common.h"
24 #include "libavutil/log.h"
25 #include "libavutil/mathematics.h"
26 #include "libavutil/opt.h"
27 #include "libavutil/parseutils.h"
28 #include "libavutil/samplefmt.h"
29 
30 #include "audio.h"
31 #include "avfilter.h"
32 #include "filters.h"
33 #include "internal.h"
34 
35 typedef struct SegmentContext {
36  const AVClass *class;
37 
39  char *points_str;
41 
43  int nb_points;
44 
45  int64_t *points;
47 
48 static void count_points(char *item_str, int *nb_items)
49 {
50  char *p;
51 
52  if (!item_str)
53  return;
54 
55  *nb_items = 1;
56  for (p = item_str; *p; p++) {
57  if (*p == '|')
58  (*nb_items)++;
59  }
60 }
61 
62 static int parse_points(AVFilterContext *ctx, char *item_str, int nb_points, int64_t *points)
63 {
64  SegmentContext *s = ctx->priv;
65  char *arg, *p = item_str;
66  char *saveptr = NULL;
67  int64_t ref, cur = 0;
68  int ret = 0;
69 
70  for (int i = 0; i < nb_points; i++) {
71  if (!(arg = av_strtok(p, "|", &saveptr)))
72  return AVERROR(EINVAL);
73 
74  p = NULL;
75  ref = 0;
76  if (*arg == '+') {
77  ref = cur;
78  arg++;
79  }
80 
81  if (s->use_timestamps) {
82  ret = av_parse_time(&points[i], arg, s->use_timestamps);
83  } else {
84  if (sscanf(arg, "%"SCNd64, &points[i]) != 1)
85  ret = AVERROR(EINVAL);
86  }
87 
88  if (ret < 0) {
89  av_log(ctx, AV_LOG_ERROR, "Invalid splits supplied: %s\n", arg);
90  return ret;
91  }
92 
93  cur = points[i];
94  points[i] += ref;
95  }
96 
97  return 0;
98 }
99 
101 {
102  SegmentContext *s = ctx->priv;
103  char *split_str;
104  int ret;
105 
106  if (s->timestamps_str && s->points_str) {
107  av_log(ctx, AV_LOG_ERROR, "Both timestamps and counts supplied.\n");
108  return AVERROR(EINVAL);
109  } else if (s->timestamps_str) {
110  s->use_timestamps = 1;
111  split_str = s->timestamps_str;
112  } else if (s->points_str) {
113  split_str = s->points_str;
114  } else {
115  av_log(ctx, AV_LOG_ERROR, "Neither timestamps nor durations nor counts supplied.\n");
116  return AVERROR(EINVAL);
117  }
118 
119  count_points(split_str, &s->nb_points);
120  s->nb_points++;
121 
122  s->points = av_calloc(s->nb_points, sizeof(*s->points));
123  if (!s->points)
124  return AVERROR(ENOMEM);
125 
126  ret = parse_points(ctx, split_str, s->nb_points - 1, s->points);
127  if (ret < 0)
128  return ret;
129 
130  s->points[s->nb_points - 1] = INT64_MAX;
131 
132  for (int i = 0; i < s->nb_points; i++) {
133  AVFilterPad pad = { 0 };
134 
135  pad.type = type;
136  pad.name = av_asprintf("output%d", i);
137  if (!pad.name)
138  return AVERROR(ENOMEM);
139 
140  if ((ret = ff_append_outpad_free_name(ctx, &pad)) < 0)
141  return ret;
142  }
143 
144  return 0;
145 }
146 
148 {
149  AVFilterContext *ctx = inlink->dst;
150  SegmentContext *s = ctx->priv;
151  AVRational tb = inlink->time_base;
152 
153  if (s->use_timestamps) {
154  for (int i = 0; i < s->nb_points - 1; i++)
155  s->points[i] = av_rescale_q(s->points[i], AV_TIME_BASE_Q, tb);
156  }
157 
158  return 0;
159 }
160 
162 {
163  SegmentContext *s = ctx->priv;
164  AVFilterLink *inlink = ctx->inputs[0];
165  int ret = 0;
166 
167  if (s->use_timestamps) {
168  ret = frame->pts >= s->points[s->current_point];
169  } else {
170  switch (inlink->type) {
171  case AVMEDIA_TYPE_VIDEO:
172  ret = inlink->frame_count_out - 1 >= s->points[s->current_point];
173  break;
174  case AVMEDIA_TYPE_AUDIO:
175  ret = inlink->sample_count_out - frame->nb_samples >= s->points[s->current_point];
176  break;
177  }
178  }
179 
180  return ret;
181 }
182 
184 {
185  AVFilterLink *inlink = ctx->inputs[0];
186  SegmentContext *s = ctx->priv;
187  AVFrame *frame = NULL;
188  int ret, status;
189  int max_samples;
190  int64_t diff;
191  int64_t pts;
192 
193  for (int i = s->current_point; i < s->nb_points; i++) {
195  }
196 
197  switch (inlink->type) {
198  case AVMEDIA_TYPE_VIDEO:
200  break;
201  case AVMEDIA_TYPE_AUDIO:
202  diff = s->points[s->current_point] - inlink->sample_count_out;
203  if (s->use_timestamps) {
204  max_samples = av_rescale_q(diff, av_make_q(1, inlink->sample_rate), inlink->time_base);
205  } else {
206  max_samples = FFMAX(1, FFMIN(diff, INT_MAX));
207  }
208  ret = ff_inlink_consume_samples(inlink, 1, max_samples, &frame);
209  break;
210  default:
211  return AVERROR_BUG;
212  }
213 
214  if (ret > 0) {
216  ff_outlink_set_status(ctx->outputs[s->current_point], AVERROR_EOF, frame->pts);
217  s->current_point++;
218  }
219 
220  if (s->current_point >= s->nb_points) {
222  return AVERROR(EINVAL);
223  }
224 
225  ret = ff_filter_frame(ctx->outputs[s->current_point], frame);
226  }
227 
228  if (ret < 0) {
229  return ret;
230  } else if (ff_inlink_acknowledge_status(inlink, &status, &pts)) {
231  for (int i = s->current_point; i < s->nb_points; i++)
232  ff_outlink_set_status(ctx->outputs[i], status, pts);
233  return 0;
234  } else {
235  for (int i = s->current_point; i < s->nb_points; i++) {
236  if (ff_outlink_frame_wanted(ctx->outputs[i]))
238  }
239  return 0;
240  }
241 }
242 
244 {
245  SegmentContext *s = ctx->priv;
246 
247  av_freep(&s->points);
248 }
249 
250 #define OFFSET(x) offsetof(SegmentContext, x)
251 #define COMMON_OPTS \
252  { "timestamps", "timestamps of input at which to split input", OFFSET(timestamps_str), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS }, \
253 
254 #if CONFIG_SEGMENT_FILTER
255 
256 static av_cold int video_init(AVFilterContext *ctx)
257 {
258  return init(ctx, AVMEDIA_TYPE_VIDEO);
259 }
260 
261 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
262 static const AVOption segment_options[] = {
264  { "frames", "frames at which to split input", OFFSET(points_str), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
265  { NULL }
266 };
267 #undef FLAGS
268 
270 
271 static const AVFilterPad segment_inputs[] = {
272  {
273  .name = "default",
274  .type = AVMEDIA_TYPE_VIDEO,
275  .config_props = config_input,
276  },
277 };
278 
279 const AVFilter ff_vf_segment = {
280  .name = "segment",
281  .description = NULL_IF_CONFIG_SMALL("Segment video stream."),
282  .init = video_init,
283  .uninit = uninit,
284  .priv_size = sizeof(SegmentContext),
285  .priv_class = &segment_class,
286  .activate = activate,
287  FILTER_INPUTS(segment_inputs),
288  .outputs = NULL,
290 };
291 #endif // CONFIG_SEGMENT_FILTER
292 
293 #if CONFIG_ASEGMENT_FILTER
294 
295 static av_cold int audio_init(AVFilterContext *ctx)
296 {
297  return init(ctx, AVMEDIA_TYPE_AUDIO);
298 }
299 
300 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
301 static const AVOption asegment_options[] = {
303  { "samples", "samples at which to split input", OFFSET(points_str), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
304  { NULL }
305 };
306 #undef FLAGS
307 
308 AVFILTER_DEFINE_CLASS(asegment);
309 
310 static const AVFilterPad asegment_inputs[] = {
311  {
312  .name = "default",
313  .type = AVMEDIA_TYPE_AUDIO,
314  .config_props = config_input,
315  },
316 };
317 
318 const AVFilter ff_af_asegment = {
319  .name = "asegment",
320  .description = NULL_IF_CONFIG_SMALL("Segment audio stream."),
321  .init = audio_init,
322  .uninit = uninit,
323  .priv_size = sizeof(SegmentContext),
324  .priv_class = &asegment_class,
325  .activate = activate,
326  FILTER_INPUTS(asegment_inputs),
327  .outputs = NULL,
329 };
330 #endif // CONFIG_ASEGMENT_FILTER
current_segment_finished
static int current_segment_finished(AVFilterContext *ctx, AVFrame *frame)
Definition: f_segment.c:161
SegmentContext::points_str
char * points_str
Definition: f_segment.c:39
status
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
parse_points
static int parse_points(AVFilterContext *ctx, char *item_str, int nb_points, int64_t *points)
Definition: f_segment.c:62
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1018
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
ff_af_asegment
const AVFilter ff_af_asegment
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:113
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:109
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:317
AVOption
AVOption.
Definition: opt.h:247
FLAGS
#define FLAGS
Definition: cmdutils.c:535
mathematics.h
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:169
ff_inlink_consume_frame
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
Definition: avfilter.c:1417
FF_FILTER_FORWARD_STATUS_BACK_ALL
#define FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, filter)
Forward the status on an output link to all input links.
Definition: filters.h:212
OFFSET
#define OFFSET(x)
Definition: f_segment.c:250
samplefmt.h
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
pts
static int64_t pts
Definition: transcode_aac.c:653
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:50
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
ff_outlink_set_status
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:189
ff_inlink_request_frame
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
Definition: avfilter.c:1534
s
#define s(width, name)
Definition: cbs_vp9.c:257
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
av_strtok
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
Definition: avstring.c:186
filters.h
ctx
AVFormatContext * ctx
Definition: movenc.c:48
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:141
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:191
arg
const char * arg
Definition: jacosubdec.c:67
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
ff_inlink_consume_samples
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
Definition: avfilter.c:1436
NULL
#define NULL
Definition: coverity.c:32
activate
static int activate(AVFilterContext *ctx)
Definition: f_segment.c:183
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
parseutils.h
av_parse_time
int av_parse_time(int64_t *timeval, const char *timestr, int duration)
Parse timestr and return in *time a corresponding number of microseconds.
Definition: parseutils.c:589
COMMON_OPTS
#define COMMON_OPTS
Definition: f_segment.c:251
count_points
static void count_points(char *item_str, int *nb_items)
Definition: f_segment.c:48
ff_inlink_acknowledge_status
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1371
AVFILTER_FLAG_DYNAMIC_OUTPUTS
#define AVFILTER_FLAG_DYNAMIC_OUTPUTS
The number of the filter outputs is not determined just by AVFilter.outputs.
Definition: avfilter.h:116
AVMediaType
AVMediaType
Definition: avutil.h:199
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
init
static av_cold int init(AVFilterContext *ctx, enum AVMediaType type)
Definition: f_segment.c:100
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
SegmentContext
Definition: f_segment.c:35
SegmentContext::current_point
int current_point
Definition: f_segment.c:42
internal.h
AVFILTER_DEFINE_CLASS
#define AVFILTER_DEFINE_CLASS(fname)
Definition: internal.h:326
log.h
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
ff_vf_segment
const AVFilter ff_vf_segment
common.h
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
SegmentContext::nb_points
int nb_points
Definition: f_segment.c:43
tb
#define tb
Definition: regdef.h:68
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:56
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:271
AVFilter
Filter definition.
Definition: avfilter.h:165
SegmentContext::use_timestamps
int use_timestamps
Definition: f_segment.c:40
ret
ret
Definition: filter_design.txt:187
AVFilterPad::type
enum AVMediaType type
AVFilterPad type.
Definition: internal.h:61
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
channel_layout.h
SegmentContext::timestamps_str
char * timestamps_str
Definition: f_segment.c:38
avfilter.h
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: f_segment.c:243
AVFILTER_FLAG_METADATA_ONLY
#define AVFILTER_FLAG_METADATA_ONLY
The filter is a "metadata" filter - it does not modify the frame data in any way.
Definition: avfilter.h:137
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
segment
Definition: hls.c:71
AVFilterContext
An instance of a filter.
Definition: avfilter.h:402
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
audio.h
SegmentContext::points
int64_t * points
Definition: f_segment.c:45
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:139
ff_append_outpad_free_name
int ff_append_outpad_free_name(AVFilterContext *f, AVFilterPad *p)
Definition: avfilter.c:155
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
config_input
static int config_input(AVFilterLink *inlink)
Definition: f_segment.c:147
ff_outlink_frame_wanted
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function. If this function returns true
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:228