FFmpeg
avfilter.c
Go to the documentation of this file.
1 /*
2  * filter layer
3  * Copyright (c) 2007 Bobby Bingham
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "libavutil/avassert.h"
23 #include "libavutil/avstring.h"
24 #include "libavutil/buffer.h"
26 #include "libavutil/common.h"
27 #include "libavutil/eval.h"
28 #include "libavutil/hwcontext.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/internal.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/pixdesc.h"
33 #include "libavutil/rational.h"
34 #include "libavutil/samplefmt.h"
35 #include "libavutil/thread.h"
36 
37 #define FF_INTERNAL_FIELDS 1
38 #include "framequeue.h"
39 
40 #include "audio.h"
41 #include "avfilter.h"
42 #include "filters.h"
43 #include "formats.h"
44 #include "internal.h"
45 
46 #include "libavutil/ffversion.h"
47 const char av_filter_ffversion[] = "FFmpeg version " FFMPEG_VERSION;
48 
49 void ff_tlog_ref(void *ctx, AVFrame *ref, int end)
50 {
51  av_unused char buf[16];
52  ff_tlog(ctx,
53  "ref[%p buf:%p data:%p linesize[%d, %d, %d, %d] pts:%"PRId64" pos:%"PRId64,
54  ref, ref->buf, ref->data[0],
55  ref->linesize[0], ref->linesize[1], ref->linesize[2], ref->linesize[3],
56  ref->pts, ref->pkt_pos);
57 
58  if (ref->width) {
59  ff_tlog(ctx, " a:%d/%d s:%dx%d i:%c iskey:%d type:%c",
61  ref->width, ref->height,
62  !ref->interlaced_frame ? 'P' : /* Progressive */
63  ref->top_field_first ? 'T' : 'B', /* Top / Bottom */
64  ref->key_frame,
66  }
67  if (ref->nb_samples) {
68  ff_tlog(ctx, " cl:%"PRId64"d n:%d r:%d",
69  ref->channel_layout,
70  ref->nb_samples,
71  ref->sample_rate);
72  }
73 
74  ff_tlog(ctx, "]%s", end ? "\n" : "");
75 }
76 
77 unsigned avfilter_version(void)
78 {
81 }
82 
83 const char *avfilter_configuration(void)
84 {
85  return FFMPEG_CONFIGURATION;
86 }
87 
88 const char *avfilter_license(void)
89 {
90 #define LICENSE_PREFIX "libavfilter license: "
91  return &LICENSE_PREFIX FFMPEG_LICENSE[sizeof(LICENSE_PREFIX) - 1];
92 }
93 
95 {
97  av_freep(&c->arg);
98  av_freep(&c->command);
99  filter->command_queue= c->next;
100  av_free(c);
101 }
102 
103 int ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
104  AVFilterPad **pads, AVFilterLink ***links,
105  AVFilterPad *newpad)
106 {
107  AVFilterLink **newlinks;
108  AVFilterPad *newpads;
109  unsigned i;
110 
111  idx = FFMIN(idx, *count);
112 
113  newpads = av_realloc_array(*pads, *count + 1, sizeof(AVFilterPad));
114  newlinks = av_realloc_array(*links, *count + 1, sizeof(AVFilterLink*));
115  if (newpads)
116  *pads = newpads;
117  if (newlinks)
118  *links = newlinks;
119  if (!newpads || !newlinks)
120  return AVERROR(ENOMEM);
121 
122  memmove(*pads + idx + 1, *pads + idx, sizeof(AVFilterPad) * (*count - idx));
123  memmove(*links + idx + 1, *links + idx, sizeof(AVFilterLink*) * (*count - idx));
124  memcpy(*pads + idx, newpad, sizeof(AVFilterPad));
125  (*links)[idx] = NULL;
126 
127  (*count)++;
128  for (i = idx + 1; i < *count; i++)
129  if ((*links)[i])
130  (*(unsigned *)((uint8_t *) (*links)[i] + padidx_off))++;
131 
132  return 0;
133 }
134 
135 int avfilter_link(AVFilterContext *src, unsigned srcpad,
136  AVFilterContext *dst, unsigned dstpad)
137 {
139 
140  av_assert0(src->graph);
141  av_assert0(dst->graph);
142  av_assert0(src->graph == dst->graph);
143 
144  if (src->nb_outputs <= srcpad || dst->nb_inputs <= dstpad ||
145  src->outputs[srcpad] || dst->inputs[dstpad])
146  return AVERROR(EINVAL);
147 
148  if (src->output_pads[srcpad].type != dst->input_pads[dstpad].type) {
149  av_log(src, AV_LOG_ERROR,
150  "Media type mismatch between the '%s' filter output pad %d (%s) and the '%s' filter input pad %d (%s)\n",
151  src->name, srcpad, (char *)av_x_if_null(av_get_media_type_string(src->output_pads[srcpad].type), "?"),
152  dst->name, dstpad, (char *)av_x_if_null(av_get_media_type_string(dst-> input_pads[dstpad].type), "?"));
153  return AVERROR(EINVAL);
154  }
155 
156  link = av_mallocz(sizeof(*link));
157  if (!link)
158  return AVERROR(ENOMEM);
159 
160  src->outputs[srcpad] = dst->inputs[dstpad] = link;
161 
162  link->src = src;
163  link->dst = dst;
164  link->srcpad = &src->output_pads[srcpad];
165  link->dstpad = &dst->input_pads[dstpad];
166  link->type = src->output_pads[srcpad].type;
168  link->format = -1;
169  ff_framequeue_init(&link->fifo, &src->graph->internal->frame_queues);
170 
171  return 0;
172 }
173 
175 {
176  if (!*link)
177  return;
178 
179  av_frame_free(&(*link)->partial_buf);
180  ff_framequeue_free(&(*link)->fifo);
181  ff_frame_pool_uninit((FFFramePool**)&(*link)->frame_pool);
182 
183  av_freep(link);
184 }
185 
186 #if FF_API_FILTER_GET_SET
188 {
189  return link->channels;
190 }
191 #endif
192 
193 void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
194 {
195  filter->ready = FFMAX(filter->ready, priority);
196 }
197 
198 /**
199  * Clear frame_blocked_in on all outputs.
200  * This is necessary whenever something changes on input.
201  */
203 {
204  unsigned i;
205 
206  for (i = 0; i < filter->nb_outputs; i++)
207  filter->outputs[i]->frame_blocked_in = 0;
208 }
209 
210 
212 {
213  if (link->status_in == status)
214  return;
215  av_assert0(!link->status_in);
216  link->status_in = status;
217  link->status_in_pts = pts;
218  link->frame_wanted_out = 0;
219  link->frame_blocked_in = 0;
220  filter_unblock(link->dst);
221  ff_filter_set_ready(link->dst, 200);
222 }
223 
225 {
227  av_assert0(!link->status_out);
228  link->status_out = status;
229  if (pts != AV_NOPTS_VALUE)
230  ff_update_link_current_pts(link, pts);
231  filter_unblock(link->dst);
232  ff_filter_set_ready(link->src, 200);
233 }
234 
236 {
238 }
239 
241  unsigned filt_srcpad_idx, unsigned filt_dstpad_idx)
242 {
243  int ret;
244  unsigned dstpad_idx = link->dstpad - link->dst->input_pads;
245 
246  av_log(link->dst, AV_LOG_VERBOSE, "auto-inserting filter '%s' "
247  "between the filter '%s' and the filter '%s'\n",
248  filt->name, link->src->name, link->dst->name);
249 
250  link->dst->inputs[dstpad_idx] = NULL;
251  if ((ret = avfilter_link(filt, filt_dstpad_idx, link->dst, dstpad_idx)) < 0) {
252  /* failed to link output filter to new filter */
253  link->dst->inputs[dstpad_idx] = link;
254  return ret;
255  }
256 
257  /* re-hookup the link to the new destination filter we inserted */
258  link->dst = filt;
259  link->dstpad = &filt->input_pads[filt_srcpad_idx];
260  filt->inputs[filt_srcpad_idx] = link;
261 
262  /* if any information on supported media formats already exists on the
263  * link, we need to preserve that */
264  if (link->outcfg.formats)
266  &filt->outputs[filt_dstpad_idx]->outcfg.formats);
267  if (link->outcfg.samplerates)
269  &filt->outputs[filt_dstpad_idx]->outcfg.samplerates);
270  if (link->outcfg.channel_layouts)
272  &filt->outputs[filt_dstpad_idx]->outcfg.channel_layouts);
273 
274  return 0;
275 }
276 
278 {
279  int (*config_link)(AVFilterLink *);
280  unsigned i;
281  int ret;
282 
283  for (i = 0; i < filter->nb_inputs; i ++) {
284  AVFilterLink *link = filter->inputs[i];
286 
287  if (!link) continue;
288  if (!link->src || !link->dst) {
289  av_log(filter, AV_LOG_ERROR,
290  "Not all input and output are properly linked (%d).\n", i);
291  return AVERROR(EINVAL);
292  }
293 
294  inlink = link->src->nb_inputs ? link->src->inputs[0] : NULL;
295  link->current_pts =
297 
298  switch (link->init_state) {
299  case AVLINK_INIT:
300  continue;
301  case AVLINK_STARTINIT:
302  av_log(filter, AV_LOG_INFO, "circular filter chain detected\n");
303  return 0;
304  case AVLINK_UNINIT:
305  link->init_state = AVLINK_STARTINIT;
306 
307  if ((ret = avfilter_config_links(link->src)) < 0)
308  return ret;
309 
310  if (!(config_link = link->srcpad->config_props)) {
311  if (link->src->nb_inputs != 1) {
312  av_log(link->src, AV_LOG_ERROR, "Source filters and filters "
313  "with more than one input "
314  "must set config_props() "
315  "callbacks on all outputs\n");
316  return AVERROR(EINVAL);
317  }
318  } else if ((ret = config_link(link)) < 0) {
319  av_log(link->src, AV_LOG_ERROR,
320  "Failed to configure output pad on %s\n",
321  link->src->name);
322  return ret;
323  }
324 
325  switch (link->type) {
326  case AVMEDIA_TYPE_VIDEO:
327  if (!link->time_base.num && !link->time_base.den)
328  link->time_base = inlink ? inlink->time_base : AV_TIME_BASE_Q;
329 
330  if (!link->sample_aspect_ratio.num && !link->sample_aspect_ratio.den)
331  link->sample_aspect_ratio = inlink ?
332  inlink->sample_aspect_ratio : (AVRational){1,1};
333 
334  if (inlink) {
335  if (!link->frame_rate.num && !link->frame_rate.den)
336  link->frame_rate = inlink->frame_rate;
337  if (!link->w)
338  link->w = inlink->w;
339  if (!link->h)
340  link->h = inlink->h;
341  } else if (!link->w || !link->h) {
342  av_log(link->src, AV_LOG_ERROR,
343  "Video source filters must set their output link's "
344  "width and height\n");
345  return AVERROR(EINVAL);
346  }
347  break;
348 
349  case AVMEDIA_TYPE_AUDIO:
350  if (inlink) {
351  if (!link->time_base.num && !link->time_base.den)
352  link->time_base = inlink->time_base;
353  }
354 
355  if (!link->time_base.num && !link->time_base.den)
356  link->time_base = (AVRational) {1, link->sample_rate};
357  }
358 
359  if (link->src->nb_inputs && link->src->inputs[0]->hw_frames_ctx &&
361  av_assert0(!link->hw_frames_ctx &&
362  "should not be set by non-hwframe-aware filter");
363  link->hw_frames_ctx = av_buffer_ref(link->src->inputs[0]->hw_frames_ctx);
364  if (!link->hw_frames_ctx)
365  return AVERROR(ENOMEM);
366  }
367 
368  if ((config_link = link->dstpad->config_props))
369  if ((ret = config_link(link)) < 0) {
370  av_log(link->dst, AV_LOG_ERROR,
371  "Failed to configure input pad on %s\n",
372  link->dst->name);
373  return ret;
374  }
375 
376  link->init_state = AVLINK_INIT;
377  }
378  }
379 
380  return 0;
381 }
382 
383 void ff_tlog_link(void *ctx, AVFilterLink *link, int end)
384 {
385  if (link->type == AVMEDIA_TYPE_VIDEO) {
386  ff_tlog(ctx,
387  "link[%p s:%dx%d fmt:%s %s->%s]%s",
388  link, link->w, link->h,
390  link->src ? link->src->filter->name : "",
391  link->dst ? link->dst->filter->name : "",
392  end ? "\n" : "");
393  } else {
394  char buf[128];
395  av_get_channel_layout_string(buf, sizeof(buf), -1, link->channel_layout);
396 
397  ff_tlog(ctx,
398  "link[%p r:%d cl:%s fmt:%s %s->%s]%s",
399  link, (int)link->sample_rate, buf,
401  link->src ? link->src->filter->name : "",
402  link->dst ? link->dst->filter->name : "",
403  end ? "\n" : "");
404  }
405 }
406 
408 {
410 
411  av_assert1(!link->dst->filter->activate);
412  if (link->status_out)
413  return link->status_out;
414  if (link->status_in) {
415  if (ff_framequeue_queued_frames(&link->fifo)) {
417  av_assert1(link->dst->ready >= 300);
418  return 0;
419  } else {
420  /* Acknowledge status change. Filters using ff_request_frame() will
421  handle the change automatically. Filters can also check the
422  status directly but none do yet. */
423  ff_avfilter_link_set_out_status(link, link->status_in, link->status_in_pts);
424  return link->status_out;
425  }
426  }
427  link->frame_wanted_out = 1;
428  ff_filter_set_ready(link->src, 100);
429  return 0;
430 }
431 
432 static int64_t guess_status_pts(AVFilterContext *ctx, int status, AVRational link_time_base)
433 {
434  unsigned i;
435  int64_t r = INT64_MAX;
436 
437  for (i = 0; i < ctx->nb_inputs; i++)
438  if (ctx->inputs[i]->status_out == status)
439  r = FFMIN(r, av_rescale_q(ctx->inputs[i]->current_pts, ctx->inputs[i]->time_base, link_time_base));
440  if (r < INT64_MAX)
441  return r;
442  av_log(ctx, AV_LOG_WARNING, "EOF timestamp not reliable\n");
443  for (i = 0; i < ctx->nb_inputs; i++)
444  r = FFMIN(r, av_rescale_q(ctx->inputs[i]->status_in_pts, ctx->inputs[i]->time_base, link_time_base));
445  if (r < INT64_MAX)
446  return r;
447  return AV_NOPTS_VALUE;
448 }
449 
451 {
452  int ret = -1;
453 
454  FF_TPRINTF_START(NULL, request_frame_to_filter); ff_tlog_link(NULL, link, 1);
455  /* Assume the filter is blocked, let the method clear it if not */
456  link->frame_blocked_in = 1;
457  if (link->srcpad->request_frame)
458  ret = link->srcpad->request_frame(link);
459  else if (link->src->inputs[0])
460  ret = ff_request_frame(link->src->inputs[0]);
461  if (ret < 0) {
462  if (ret != AVERROR(EAGAIN) && ret != link->status_in)
463  ff_avfilter_link_set_in_status(link, ret, guess_status_pts(link->src, ret, link->time_base));
464  if (ret == AVERROR_EOF)
465  ret = 0;
466  }
467  return ret;
468 }
469 
470 static const char *const var_names[] = {
471  "t",
472  "n",
473  "pos",
474  "w",
475  "h",
476  NULL
477 };
478 
479 enum {
486 };
487 
488 static int set_enable_expr(AVFilterContext *ctx, const char *expr)
489 {
490  int ret;
491  char *expr_dup;
492  AVExpr *old = ctx->enable;
493 
495  av_log(ctx, AV_LOG_ERROR, "Timeline ('enable' option) not supported "
496  "with filter '%s'\n", ctx->filter->name);
497  return AVERROR_PATCHWELCOME;
498  }
499 
500  expr_dup = av_strdup(expr);
501  if (!expr_dup)
502  return AVERROR(ENOMEM);
503 
504  if (!ctx->var_values) {
505  ctx->var_values = av_calloc(VAR_VARS_NB, sizeof(*ctx->var_values));
506  if (!ctx->var_values) {
507  av_free(expr_dup);
508  return AVERROR(ENOMEM);
509  }
510  }
511 
512  ret = av_expr_parse((AVExpr**)&ctx->enable, expr_dup, var_names,
513  NULL, NULL, NULL, NULL, 0, ctx->priv);
514  if (ret < 0) {
515  av_log(ctx->priv, AV_LOG_ERROR,
516  "Error when evaluating the expression '%s' for enable\n",
517  expr_dup);
518  av_free(expr_dup);
519  return ret;
520  }
521 
522  av_expr_free(old);
523  av_free(ctx->enable_str);
524  ctx->enable_str = expr_dup;
525  return 0;
526 }
527 
529 {
530  if (pts == AV_NOPTS_VALUE)
531  return;
532  link->current_pts = pts;
534  /* TODO use duration */
535  if (link->graph && link->age_index >= 0)
537 }
538 
539 int avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags)
540 {
541  if(!strcmp(cmd, "ping")){
542  char local_res[256] = {0};
543 
544  if (!res) {
545  res = local_res;
546  res_len = sizeof(local_res);
547  }
548  av_strlcatf(res, res_len, "pong from:%s %s\n", filter->filter->name, filter->name);
549  if (res == local_res)
550  av_log(filter, AV_LOG_INFO, "%s", res);
551  return 0;
552  }else if(!strcmp(cmd, "enable")) {
553  return set_enable_expr(filter, arg);
554  }else if(filter->filter->process_command) {
555  return filter->filter->process_command(filter, cmd, arg, res, res_len, flags);
556  }
557  return AVERROR(ENOSYS);
558 }
559 
561 {
562  int count;
563 
564  if (!pads)
565  return 0;
566 
567  for (count = 0; pads->name; count++)
568  pads++;
569  return count;
570 }
571 
572 static const char *default_filter_name(void *filter_ctx)
573 {
575  return ctx->name ? ctx->name : ctx->filter->name;
576 }
577 
578 static void *filter_child_next(void *obj, void *prev)
579 {
580  AVFilterContext *ctx = obj;
581  if (!prev && ctx->filter && ctx->filter->priv_class && ctx->priv)
582  return ctx->priv;
583  return NULL;
584 }
585 
586 #if FF_API_CHILD_CLASS_NEXT
587 static const AVClass *filter_child_class_next(const AVClass *prev)
588 {
589  void *opaque = NULL;
590  const AVFilter *f = NULL;
591 
592  /* find the filter that corresponds to prev */
593  while (prev && (f = av_filter_iterate(&opaque)))
594  if (f->priv_class == prev)
595  break;
596 
597  /* could not find filter corresponding to prev */
598  if (prev && !f)
599  return NULL;
600 
601  /* find next filter with specific options */
602  while ((f = av_filter_iterate(&opaque)))
603  if (f->priv_class)
604  return f->priv_class;
605 
606  return NULL;
607 }
608 #endif
609 
610 static const AVClass *filter_child_class_iterate(void **iter)
611 {
612  const AVFilter *f;
613 
614  while ((f = av_filter_iterate(iter)))
615  if (f->priv_class)
616  return f->priv_class;
617 
618  return NULL;
619 }
620 
621 #define OFFSET(x) offsetof(AVFilterContext, x)
622 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
623 #define TFLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
624 static const AVOption avfilter_options[] = {
625  { "thread_type", "Allowed thread types", OFFSET(thread_type), AV_OPT_TYPE_FLAGS,
626  { .i64 = AVFILTER_THREAD_SLICE }, 0, INT_MAX, FLAGS, "thread_type" },
627  { "slice", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AVFILTER_THREAD_SLICE }, .flags = FLAGS, .unit = "thread_type" },
628  { "enable", "set enable expression", OFFSET(enable_str), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = TFLAGS },
629  { "threads", "Allowed number of threads", OFFSET(nb_threads), AV_OPT_TYPE_INT,
630  { .i64 = 0 }, 0, INT_MAX, FLAGS },
631  { "extra_hw_frames", "Number of extra hardware frames to allocate for the user",
632  OFFSET(extra_hw_frames), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
633  { NULL },
634 };
635 
636 static const AVClass avfilter_class = {
637  .class_name = "AVFilter",
638  .item_name = default_filter_name,
639  .version = LIBAVUTIL_VERSION_INT,
640  .category = AV_CLASS_CATEGORY_FILTER,
641  .child_next = filter_child_next,
642 #if FF_API_CHILD_CLASS_NEXT
643  .child_class_next = filter_child_class_next,
644 #endif
645  .child_class_iterate = filter_child_class_iterate,
647 };
648 
650  int *ret, int nb_jobs)
651 {
652  int i;
653 
654  for (i = 0; i < nb_jobs; i++) {
655  int r = func(ctx, arg, i, nb_jobs);
656  if (ret)
657  ret[i] = r;
658  }
659  return 0;
660 }
661 
662 AVFilterContext *ff_filter_alloc(const AVFilter *filter, const char *inst_name)
663 {
665  int preinited = 0;
666 
667  if (!filter)
668  return NULL;
669 
670  ret = av_mallocz(sizeof(AVFilterContext));
671  if (!ret)
672  return NULL;
673 
674  ret->av_class = &avfilter_class;
675  ret->filter = filter;
676  ret->name = inst_name ? av_strdup(inst_name) : NULL;
677  if (filter->priv_size) {
678  ret->priv = av_mallocz(filter->priv_size);
679  if (!ret->priv)
680  goto err;
681  }
682  if (filter->preinit) {
683  if (filter->preinit(ret) < 0)
684  goto err;
685  preinited = 1;
686  }
687 
688  av_opt_set_defaults(ret);
689  if (filter->priv_class) {
690  *(const AVClass**)ret->priv = filter->priv_class;
692  }
693 
694  ret->internal = av_mallocz(sizeof(*ret->internal));
695  if (!ret->internal)
696  goto err;
698 
699  ret->nb_inputs = avfilter_pad_count(filter->inputs);
700  if (ret->nb_inputs ) {
701  ret->input_pads = av_malloc_array(ret->nb_inputs, sizeof(AVFilterPad));
702  if (!ret->input_pads)
703  goto err;
704  memcpy(ret->input_pads, filter->inputs, sizeof(AVFilterPad) * ret->nb_inputs);
705  ret->inputs = av_mallocz_array(ret->nb_inputs, sizeof(AVFilterLink*));
706  if (!ret->inputs)
707  goto err;
708  }
709 
710  ret->nb_outputs = avfilter_pad_count(filter->outputs);
711  if (ret->nb_outputs) {
712  ret->output_pads = av_malloc_array(ret->nb_outputs, sizeof(AVFilterPad));
713  if (!ret->output_pads)
714  goto err;
715  memcpy(ret->output_pads, filter->outputs, sizeof(AVFilterPad) * ret->nb_outputs);
716  ret->outputs = av_mallocz_array(ret->nb_outputs, sizeof(AVFilterLink*));
717  if (!ret->outputs)
718  goto err;
719  }
720 
721  return ret;
722 
723 err:
724  if (preinited)
725  filter->uninit(ret);
726  av_freep(&ret->inputs);
727  av_freep(&ret->input_pads);
728  ret->nb_inputs = 0;
729  av_freep(&ret->outputs);
730  av_freep(&ret->output_pads);
731  ret->nb_outputs = 0;
732  av_freep(&ret->priv);
733  av_freep(&ret->internal);
734  av_free(ret);
735  return NULL;
736 }
737 
739 {
740  if (!link)
741  return;
742 
743  if (link->src)
744  link->src->outputs[link->srcpad - link->src->output_pads] = NULL;
745  if (link->dst)
746  link->dst->inputs[link->dstpad - link->dst->input_pads] = NULL;
747 
749 
756  avfilter_link_free(&link);
757 }
758 
760 {
761  int i;
762 
763  if (!filter)
764  return;
765 
766  if (filter->graph)
767  ff_filter_graph_remove_filter(filter->graph, filter);
768 
769  if (filter->filter->uninit)
770  filter->filter->uninit(filter);
771 
772  for (i = 0; i < filter->nb_inputs; i++) {
773  free_link(filter->inputs[i]);
774  }
775  for (i = 0; i < filter->nb_outputs; i++) {
776  free_link(filter->outputs[i]);
777  }
778 
779  if (filter->filter->priv_class)
780  av_opt_free(filter->priv);
781 
782  av_buffer_unref(&filter->hw_device_ctx);
783 
784  av_freep(&filter->name);
785  av_freep(&filter->input_pads);
786  av_freep(&filter->output_pads);
787  av_freep(&filter->inputs);
788  av_freep(&filter->outputs);
789  av_freep(&filter->priv);
790  while(filter->command_queue){
791  ff_command_queue_pop(filter);
792  }
793  av_opt_free(filter);
794  av_expr_free(filter->enable);
795  filter->enable = NULL;
796  av_freep(&filter->var_values);
797  av_freep(&filter->internal);
798  av_free(filter);
799 }
800 
802 {
803  if (ctx->nb_threads > 0)
804  return FFMIN(ctx->nb_threads, ctx->graph->nb_threads);
805  return ctx->graph->nb_threads;
806 }
807 
809  const char *args)
810 {
811  const AVOption *o = NULL;
812  int ret, count = 0;
813  char *av_uninit(parsed_key), *av_uninit(value);
814  const char *key;
815  int offset= -1;
816 
817  if (!args)
818  return 0;
819 
820  while (*args) {
821  const char *shorthand = NULL;
822 
823  o = av_opt_next(ctx->priv, o);
824  if (o) {
825  if (o->type == AV_OPT_TYPE_CONST || o->offset == offset)
826  continue;
827  offset = o->offset;
828  shorthand = o->name;
829  }
830 
831  ret = av_opt_get_key_value(&args, "=", ":",
832  shorthand ? AV_OPT_FLAG_IMPLICIT_KEY : 0,
833  &parsed_key, &value);
834  if (ret < 0) {
835  if (ret == AVERROR(EINVAL))
836  av_log(ctx, AV_LOG_ERROR, "No option name near '%s'\n", args);
837  else
838  av_log(ctx, AV_LOG_ERROR, "Unable to parse '%s': %s\n", args,
839  av_err2str(ret));
840  return ret;
841  }
842  if (*args)
843  args++;
844  if (parsed_key) {
845  key = parsed_key;
846  while ((o = av_opt_next(ctx->priv, o))); /* discard all remaining shorthand */
847  } else {
848  key = shorthand;
849  }
850 
851  av_log(ctx, AV_LOG_DEBUG, "Setting '%s' to value '%s'\n", key, value);
852 
853  if (av_opt_find(ctx, key, NULL, 0, 0)) {
854  ret = av_opt_set(ctx, key, value, 0);
855  if (ret < 0) {
856  av_free(value);
857  av_free(parsed_key);
858  return ret;
859  }
860  } else {
861  av_dict_set(options, key, value, 0);
862  if ((ret = av_opt_set(ctx->priv, key, value, AV_OPT_SEARCH_CHILDREN)) < 0) {
864  if (ret == AVERROR_OPTION_NOT_FOUND)
865  av_log(ctx, AV_LOG_ERROR, "Option '%s' not found\n", key);
866  av_free(value);
867  av_free(parsed_key);
868  return ret;
869  }
870  }
871  }
872 
873  av_free(value);
874  av_free(parsed_key);
875  count++;
876  }
877 
878  return count;
879 }
880 
882  const char *arg, char *res, int res_len, int flags)
883 {
884  const AVOption *o;
885 
886  if (!ctx->filter->priv_class)
887  return 0;
889  if (!o)
890  return AVERROR(ENOSYS);
891  return av_opt_set(ctx->priv, cmd, arg, 0);
892 }
893 
895 {
896  int ret = 0;
897 
898  ret = av_opt_set_dict(ctx, options);
899  if (ret < 0) {
900  av_log(ctx, AV_LOG_ERROR, "Error applying generic filter options.\n");
901  return ret;
902  }
903 
906  ctx->graph->internal->thread_execute) {
909  } else {
910  ctx->thread_type = 0;
911  }
912 
913  if (ctx->filter->priv_class) {
914  ret = av_opt_set_dict2(ctx->priv, options, AV_OPT_SEARCH_CHILDREN);
915  if (ret < 0) {
916  av_log(ctx, AV_LOG_ERROR, "Error applying options to the filter.\n");
917  return ret;
918  }
919  }
920 
921  if (ctx->filter->init_opaque)
922  ret = ctx->filter->init_opaque(ctx, NULL);
923  else if (ctx->filter->init)
924  ret = ctx->filter->init(ctx);
925  else if (ctx->filter->init_dict)
926  ret = ctx->filter->init_dict(ctx, options);
927 
928  if (ctx->enable_str) {
929  ret = set_enable_expr(ctx, ctx->enable_str);
930  if (ret < 0)
931  return ret;
932  }
933 
934  return ret;
935 }
936 
937 int avfilter_init_str(AVFilterContext *filter, const char *args)
938 {
941  int ret = 0;
942 
943  if (args && *args) {
944  if (!filter->filter->priv_class) {
945  av_log(filter, AV_LOG_ERROR, "This filter does not take any "
946  "options, but options were provided: %s.\n", args);
947  return AVERROR(EINVAL);
948  }
949 
950 #if FF_API_OLD_FILTER_OPTS_ERROR
951  if ( !strcmp(filter->filter->name, "format") ||
952  !strcmp(filter->filter->name, "noformat") ||
953  !strcmp(filter->filter->name, "frei0r") ||
954  !strcmp(filter->filter->name, "frei0r_src") ||
955  !strcmp(filter->filter->name, "ocv") ||
956  !strcmp(filter->filter->name, "pan") ||
957  !strcmp(filter->filter->name, "pp") ||
958  !strcmp(filter->filter->name, "aevalsrc")) {
959  /* a hack for compatibility with the old syntax
960  * replace colons with |s */
961  char *copy = av_strdup(args);
962  char *p = copy;
963  int nb_leading = 0; // number of leading colons to skip
964  int deprecated = 0;
965 
966  if (!copy) {
967  ret = AVERROR(ENOMEM);
968  goto fail;
969  }
970 
971  if (!strcmp(filter->filter->name, "frei0r") ||
972  !strcmp(filter->filter->name, "ocv"))
973  nb_leading = 1;
974  else if (!strcmp(filter->filter->name, "frei0r_src"))
975  nb_leading = 3;
976 
977  while (nb_leading--) {
978  p = strchr(p, ':');
979  if (!p) {
980  p = copy + strlen(copy);
981  break;
982  }
983  p++;
984  }
985 
986  deprecated = strchr(p, ':') != NULL;
987 
988  if (!strcmp(filter->filter->name, "aevalsrc")) {
989  deprecated = 0;
990  while ((p = strchr(p, ':')) && p[1] != ':') {
991  const char *epos = strchr(p + 1, '=');
992  const char *spos = strchr(p + 1, ':');
993  const int next_token_is_opt = epos && (!spos || epos < spos);
994  if (next_token_is_opt) {
995  p++;
996  break;
997  }
998  /* next token does not contain a '=', assume a channel expression */
999  deprecated = 1;
1000  *p++ = '|';
1001  }
1002  if (p && *p == ':') { // double sep '::' found
1003  deprecated = 1;
1004  memmove(p, p + 1, strlen(p));
1005  }
1006  } else
1007  while ((p = strchr(p, ':')))
1008  *p++ = '|';
1009 
1010  if (deprecated) {
1011  av_log(filter, AV_LOG_ERROR, "This syntax is deprecated. Use "
1012  "'|' to separate the list items ('%s' instead of '%s')\n",
1013  copy, args);
1014  ret = AVERROR(EINVAL);
1015  } else {
1016  ret = process_options(filter, &options, copy);
1017  }
1018  av_freep(&copy);
1019 
1020  if (ret < 0)
1021  goto fail;
1022  } else
1023 #endif
1024  {
1025  ret = process_options(filter, &options, args);
1026  if (ret < 0)
1027  goto fail;
1028  }
1029  }
1030 
1031  ret = avfilter_init_dict(filter, &options);
1032  if (ret < 0)
1033  goto fail;
1034 
1035  if ((e = av_dict_get(options, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
1036  av_log(filter, AV_LOG_ERROR, "No such option: %s.\n", e->key);
1038  goto fail;
1039  }
1040 
1041 fail:
1042  av_dict_free(&options);
1043 
1044  return ret;
1045 }
1046 
1047 const char *avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
1048 {
1049  return pads[pad_idx].name;
1050 }
1051 
1052 enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
1053 {
1054  return pads[pad_idx].type;
1055 }
1056 
1058 {
1059  return ff_filter_frame(link->dst->outputs[0], frame);
1060 }
1061 
1063 {
1065  AVFilterContext *dstctx = link->dst;
1066  AVFilterPad *dst = link->dstpad;
1067  int ret;
1068 
1069  if (!(filter_frame = dst->filter_frame))
1071 
1072  if (dst->needs_writable) {
1073  ret = ff_inlink_make_frame_writable(link, &frame);
1074  if (ret < 0)
1075  goto fail;
1076  }
1077 
1078  ff_inlink_process_commands(link, frame);
1079  dstctx->is_disabled = !ff_inlink_evaluate_timeline_at_frame(link, frame);
1080 
1081  if (dstctx->is_disabled &&
1084  ret = filter_frame(link, frame);
1085  link->frame_count_out++;
1086  return ret;
1087 
1088 fail:
1089  av_frame_free(&frame);
1090  return ret;
1091 }
1092 
1094 {
1095  int ret;
1097 
1098  /* Consistency checks */
1099  if (link->type == AVMEDIA_TYPE_VIDEO) {
1100  if (strcmp(link->dst->filter->name, "buffersink") &&
1101  strcmp(link->dst->filter->name, "format") &&
1102  strcmp(link->dst->filter->name, "idet") &&
1103  strcmp(link->dst->filter->name, "null") &&
1104  strcmp(link->dst->filter->name, "scale")) {
1105  av_assert1(frame->format == link->format);
1106  av_assert1(frame->width == link->w);
1107  av_assert1(frame->height == link->h);
1108  }
1109  } else {
1110  if (frame->format != link->format) {
1111  av_log(link->dst, AV_LOG_ERROR, "Format change is not supported\n");
1112  goto error;
1113  }
1114  if (frame->channels != link->channels) {
1115  av_log(link->dst, AV_LOG_ERROR, "Channel count change is not supported\n");
1116  goto error;
1117  }
1118  if (frame->channel_layout != link->channel_layout) {
1119  av_log(link->dst, AV_LOG_ERROR, "Channel layout change is not supported\n");
1120  goto error;
1121  }
1122  if (frame->sample_rate != link->sample_rate) {
1123  av_log(link->dst, AV_LOG_ERROR, "Sample rate change is not supported\n");
1124  goto error;
1125  }
1126  }
1127 
1128  link->frame_blocked_in = link->frame_wanted_out = 0;
1129  link->frame_count_in++;
1130  filter_unblock(link->dst);
1131  ret = ff_framequeue_add(&link->fifo, frame);
1132  if (ret < 0) {
1133  av_frame_free(&frame);
1134  return ret;
1135  }
1136  ff_filter_set_ready(link->dst, 300);
1137  return 0;
1138 
1139 error:
1140  av_frame_free(&frame);
1141  return AVERROR_PATCHWELCOME;
1142 }
1143 
1144 static int samples_ready(AVFilterLink *link, unsigned min)
1145 {
1146  return ff_framequeue_queued_frames(&link->fifo) &&
1147  (ff_framequeue_queued_samples(&link->fifo) >= min ||
1148  link->status_in);
1149 }
1150 
1151 static int take_samples(AVFilterLink *link, unsigned min, unsigned max,
1152  AVFrame **rframe)
1153 {
1154  AVFrame *frame0, *frame, *buf;
1155  unsigned nb_samples, nb_frames, i, p;
1156  int ret;
1157 
1158  /* Note: this function relies on no format changes and must only be
1159  called with enough samples. */
1160  av_assert1(samples_ready(link, link->min_samples));
1161  frame0 = frame = ff_framequeue_peek(&link->fifo, 0);
1162  if (!link->fifo.samples_skipped && frame->nb_samples >= min && frame->nb_samples <= max) {
1163  *rframe = ff_framequeue_take(&link->fifo);
1164  return 0;
1165  }
1166  nb_frames = 0;
1167  nb_samples = 0;
1168  while (1) {
1169  if (nb_samples + frame->nb_samples > max) {
1170  if (nb_samples < min)
1171  nb_samples = max;
1172  break;
1173  }
1174  nb_samples += frame->nb_samples;
1175  nb_frames++;
1176  if (nb_frames == ff_framequeue_queued_frames(&link->fifo))
1177  break;
1178  frame = ff_framequeue_peek(&link->fifo, nb_frames);
1179  }
1180 
1181  buf = ff_get_audio_buffer(link, nb_samples);
1182  if (!buf)
1183  return AVERROR(ENOMEM);
1184  ret = av_frame_copy_props(buf, frame0);
1185  if (ret < 0) {
1186  av_frame_free(&buf);
1187  return ret;
1188  }
1189  buf->pts = frame0->pts;
1190 
1191  p = 0;
1192  for (i = 0; i < nb_frames; i++) {
1193  frame = ff_framequeue_take(&link->fifo);
1194  av_samples_copy(buf->extended_data, frame->extended_data, p, 0,
1195  frame->nb_samples, link->channels, link->format);
1196  p += frame->nb_samples;
1197  av_frame_free(&frame);
1198  }
1199  if (p < nb_samples) {
1200  unsigned n = nb_samples - p;
1201  frame = ff_framequeue_peek(&link->fifo, 0);
1202  av_samples_copy(buf->extended_data, frame->extended_data, p, 0, n,
1203  link->channels, link->format);
1204  ff_framequeue_skip_samples(&link->fifo, n, link->time_base);
1205  }
1206 
1207  *rframe = buf;
1208  return 0;
1209 }
1210 
1212 {
1213  AVFrame *frame = NULL;
1214  AVFilterContext *dst = link->dst;
1215  int ret;
1216 
1218  ret = link->min_samples ?
1219  ff_inlink_consume_samples(link, link->min_samples, link->max_samples, &frame) :
1220  ff_inlink_consume_frame(link, &frame);
1221  av_assert1(ret);
1222  if (ret < 0) {
1223  av_assert1(!frame);
1224  return ret;
1225  }
1226  /* The filter will soon have received a new frame, that may allow it to
1227  produce one or more: unblock its outputs. */
1228  filter_unblock(dst);
1229  /* AVFilterPad.filter_frame() expect frame_count_out to have the value
1230  before the frame; ff_filter_frame_framed() will re-increment it. */
1231  link->frame_count_out--;
1232  ret = ff_filter_frame_framed(link, frame);
1233  if (ret < 0 && ret != link->status_out) {
1235  } else {
1236  /* Run once again, to see if several frames were available, or if
1237  the input status has also changed, or any other reason. */
1238  ff_filter_set_ready(dst, 300);
1239  }
1240  return ret;
1241 }
1242 
1244 {
1245  unsigned out = 0, progress = 0;
1246  int ret;
1247 
1248  av_assert0(!in->status_out);
1249  if (!filter->nb_outputs) {
1250  /* not necessary with the current API and sinks */
1251  return 0;
1252  }
1253  while (!in->status_out) {
1254  if (!filter->outputs[out]->status_in) {
1255  progress++;
1256  ret = ff_request_frame_to_filter(filter->outputs[out]);
1257  if (ret < 0)
1258  return ret;
1259  }
1260  if (++out == filter->nb_outputs) {
1261  if (!progress) {
1262  /* Every output already closed: input no longer interesting
1263  (example: overlay in shortest mode, other input closed). */
1264  ff_avfilter_link_set_out_status(in, in->status_in, in->status_in_pts);
1265  return 0;
1266  }
1267  progress = 0;
1268  out = 0;
1269  }
1270  }
1271  ff_filter_set_ready(filter, 200);
1272  return 0;
1273 }
1274 
1276 {
1277  unsigned i;
1278 
1279  for (i = 0; i < filter->nb_inputs; i++) {
1280  if (samples_ready(filter->inputs[i], filter->inputs[i]->min_samples)) {
1281  return ff_filter_frame_to_filter(filter->inputs[i]);
1282  }
1283  }
1284  for (i = 0; i < filter->nb_inputs; i++) {
1285  if (filter->inputs[i]->status_in && !filter->inputs[i]->status_out) {
1286  av_assert1(!ff_framequeue_queued_frames(&filter->inputs[i]->fifo));
1287  return forward_status_change(filter, filter->inputs[i]);
1288  }
1289  }
1290  for (i = 0; i < filter->nb_outputs; i++) {
1291  if (filter->outputs[i]->frame_wanted_out &&
1292  !filter->outputs[i]->frame_blocked_in) {
1293  return ff_request_frame_to_filter(filter->outputs[i]);
1294  }
1295  }
1296  return FFERROR_NOT_READY;
1297 }
1298 
1299 /*
1300  Filter scheduling and activation
1301 
1302  When a filter is activated, it must:
1303  - if possible, output a frame;
1304  - else, if relevant, forward the input status change;
1305  - else, check outputs for wanted frames and forward the requests.
1306 
1307  The following AVFilterLink fields are used for activation:
1308 
1309  - frame_wanted_out:
1310 
1311  This field indicates if a frame is needed on this input of the
1312  destination filter. A positive value indicates that a frame is needed
1313  to process queued frames or internal data or to satisfy the
1314  application; a zero value indicates that a frame is not especially
1315  needed but could be processed anyway; a negative value indicates that a
1316  frame would just be queued.
1317 
1318  It is set by filters using ff_request_frame() or ff_request_no_frame(),
1319  when requested by the application through a specific API or when it is
1320  set on one of the outputs.
1321 
1322  It is cleared when a frame is sent from the source using
1323  ff_filter_frame().
1324 
1325  It is also cleared when a status change is sent from the source using
1326  ff_avfilter_link_set_in_status().
1327 
1328  - frame_blocked_in:
1329 
1330  This field means that the source filter can not generate a frame as is.
1331  Its goal is to avoid repeatedly calling the request_frame() method on
1332  the same link.
1333 
1334  It is set by the framework on all outputs of a filter before activating it.
1335 
1336  It is automatically cleared by ff_filter_frame().
1337 
1338  It is also automatically cleared by ff_avfilter_link_set_in_status().
1339 
1340  It is also cleared on all outputs (using filter_unblock()) when
1341  something happens on an input: processing a frame or changing the
1342  status.
1343 
1344  - fifo:
1345 
1346  Contains the frames queued on a filter input. If it contains frames and
1347  frame_wanted_out is not set, then the filter can be activated. If that
1348  result in the filter not able to use these frames, the filter must set
1349  frame_wanted_out to ask for more frames.
1350 
1351  - status_in and status_in_pts:
1352 
1353  Status (EOF or error code) of the link and timestamp of the status
1354  change (in link time base, same as frames) as seen from the input of
1355  the link. The status change is considered happening after the frames
1356  queued in fifo.
1357 
1358  It is set by the source filter using ff_avfilter_link_set_in_status().
1359 
1360  - status_out:
1361 
1362  Status of the link as seen from the output of the link. The status
1363  change is considered having already happened.
1364 
1365  It is set by the destination filter using
1366  ff_avfilter_link_set_out_status().
1367 
1368  Filters are activated according to the ready field, set using the
1369  ff_filter_set_ready(). Eventually, a priority queue will be used.
1370  ff_filter_set_ready() is called whenever anything could cause progress to
1371  be possible. Marking a filter ready when it is not is not a problem,
1372  except for the small overhead it causes.
1373 
1374  Conditions that cause a filter to be marked ready are:
1375 
1376  - frames added on an input link;
1377 
1378  - changes in the input or output status of an input link;
1379 
1380  - requests for a frame on an output link;
1381 
1382  - after any actual processing using the legacy methods (filter_frame(),
1383  and request_frame() to acknowledge status changes), to run once more
1384  and check if enough input was present for several frames.
1385 
1386  Examples of scenarios to consider:
1387 
1388  - buffersrc: activate if frame_wanted_out to notify the application;
1389  activate when the application adds a frame to push it immediately.
1390 
1391  - testsrc: activate only if frame_wanted_out to produce and push a frame.
1392 
1393  - concat (not at stitch points): can process a frame on any output.
1394  Activate if frame_wanted_out on output to forward on the corresponding
1395  input. Activate when a frame is present on input to process it
1396  immediately.
1397 
1398  - framesync: needs at least one frame on each input; extra frames on the
1399  wrong input will accumulate. When a frame is first added on one input,
1400  set frame_wanted_out<0 on it to avoid getting more (would trigger
1401  testsrc) and frame_wanted_out>0 on the other to allow processing it.
1402 
1403  Activation of old filters:
1404 
1405  In order to activate a filter implementing the legacy filter_frame() and
1406  request_frame() methods, perform the first possible of the following
1407  actions:
1408 
1409  - If an input has frames in fifo and frame_wanted_out == 0, dequeue a
1410  frame and call filter_frame().
1411 
1412  Rationale: filter frames as soon as possible instead of leaving them
1413  queued; frame_wanted_out < 0 is not possible since the old API does not
1414  set it nor provides any similar feedback; frame_wanted_out > 0 happens
1415  when min_samples > 0 and there are not enough samples queued.
1416 
1417  - If an input has status_in set but not status_out, try to call
1418  request_frame() on one of the outputs in the hope that it will trigger
1419  request_frame() on the input with status_in and acknowledge it. This is
1420  awkward and fragile, filters with several inputs or outputs should be
1421  updated to direct activation as soon as possible.
1422 
1423  - If an output has frame_wanted_out > 0 and not frame_blocked_in, call
1424  request_frame().
1425 
1426  Rationale: checking frame_blocked_in is necessary to avoid requesting
1427  repeatedly on a blocked input if another is not blocked (example:
1428  [buffersrc1][testsrc1][buffersrc2][testsrc2]concat=v=2).
1429  */
1430 
1432 {
1433  int ret;
1434 
1435  /* Generic timeline support is not yet implemented but should be easy */
1437  filter->filter->activate));
1438  filter->ready = 0;
1439  ret = filter->filter->activate ? filter->filter->activate(filter) :
1441  if (ret == FFERROR_NOT_READY)
1442  ret = 0;
1443  return ret;
1444 }
1445 
1446 int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
1447 {
1448  *rpts = link->current_pts;
1449  if (ff_framequeue_queued_frames(&link->fifo))
1450  return *rstatus = 0;
1451  if (link->status_out)
1452  return *rstatus = link->status_out;
1453  if (!link->status_in)
1454  return *rstatus = 0;
1455  *rstatus = link->status_out = link->status_in;
1456  ff_update_link_current_pts(link, link->status_in_pts);
1457  *rpts = link->current_pts;
1458  return 1;
1459 }
1460 
1462 {
1463  return ff_framequeue_queued_frames(&link->fifo);
1464 }
1465 
1467 {
1468  return ff_framequeue_queued_frames(&link->fifo) > 0;
1469 }
1470 
1472 {
1473  return ff_framequeue_queued_samples(&link->fifo);
1474 }
1475 
1477 {
1478  uint64_t samples = ff_framequeue_queued_samples(&link->fifo);
1479  av_assert1(min);
1480  return samples >= min || (link->status_in && samples);
1481 }
1482 
1484 {
1485  ff_update_link_current_pts(link, frame->pts);
1486  ff_inlink_process_commands(link, frame);
1487  link->dst->is_disabled = !ff_inlink_evaluate_timeline_at_frame(link, frame);
1488  link->frame_count_out++;
1489 }
1490 
1492 {
1493  AVFrame *frame;
1494 
1495  *rframe = NULL;
1497  return 0;
1498 
1499  if (link->fifo.samples_skipped) {
1500  frame = ff_framequeue_peek(&link->fifo, 0);
1501  return ff_inlink_consume_samples(link, frame->nb_samples, frame->nb_samples, rframe);
1502  }
1503 
1504  frame = ff_framequeue_take(&link->fifo);
1505  consume_update(link, frame);
1506  *rframe = frame;
1507  return 1;
1508 }
1509 
1511  AVFrame **rframe)
1512 {
1513  AVFrame *frame;
1514  int ret;
1515 
1516  av_assert1(min);
1517  *rframe = NULL;
1518  if (!ff_inlink_check_available_samples(link, min))
1519  return 0;
1520  if (link->status_in)
1521  min = FFMIN(min, ff_framequeue_queued_samples(&link->fifo));
1522  ret = take_samples(link, min, max, &frame);
1523  if (ret < 0)
1524  return ret;
1525  consume_update(link, frame);
1526  *rframe = frame;
1527  return 1;
1528 }
1529 
1531 {
1532  return ff_framequeue_peek(&link->fifo, idx);
1533 }
1534 
1536 {
1537  AVFrame *frame = *rframe;
1538  AVFrame *out;
1539  int ret;
1540 
1541  if (av_frame_is_writable(frame))
1542  return 0;
1543  av_log(link->dst, AV_LOG_DEBUG, "Copying data in avfilter.\n");
1544 
1545  switch (link->type) {
1546  case AVMEDIA_TYPE_VIDEO:
1547  out = ff_get_video_buffer(link, link->w, link->h);
1548  break;
1549  case AVMEDIA_TYPE_AUDIO:
1550  out = ff_get_audio_buffer(link, frame->nb_samples);
1551  break;
1552  default:
1553  return AVERROR(EINVAL);
1554  }
1555  if (!out)
1556  return AVERROR(ENOMEM);
1557 
1558  ret = av_frame_copy_props(out, frame);
1559  if (ret < 0) {
1560  av_frame_free(&out);
1561  return ret;
1562  }
1563 
1564  switch (link->type) {
1565  case AVMEDIA_TYPE_VIDEO:
1566  av_image_copy(out->data, out->linesize, (const uint8_t **)frame->data, frame->linesize,
1567  frame->format, frame->width, frame->height);
1568  break;
1569  case AVMEDIA_TYPE_AUDIO:
1571  0, 0, frame->nb_samples,
1572  frame->channels,
1573  frame->format);
1574  break;
1575  default:
1576  av_assert0(!"reached");
1577  }
1578 
1579  av_frame_free(&frame);
1580  *rframe = out;
1581  return 0;
1582 }
1583 
1585 {
1586  AVFilterCommand *cmd = link->dst->command_queue;
1587 
1588  while(cmd && cmd->time <= frame->pts * av_q2d(link->time_base)){
1589  av_log(link->dst, AV_LOG_DEBUG,
1590  "Processing command time:%f command:%s arg:%s\n",
1591  cmd->time, cmd->command, cmd->arg);
1592  avfilter_process_command(link->dst, cmd->command, cmd->arg, 0, 0, cmd->flags);
1593  ff_command_queue_pop(link->dst);
1594  cmd= link->dst->command_queue;
1595  }
1596  return 0;
1597 }
1598 
1600 {
1601  AVFilterContext *dstctx = link->dst;
1602  int64_t pts = frame->pts;
1603  int64_t pos = frame->pkt_pos;
1604 
1605  if (!dstctx->enable_str)
1606  return 1;
1607 
1608  dstctx->var_values[VAR_N] = link->frame_count_out;
1609  dstctx->var_values[VAR_T] = pts == AV_NOPTS_VALUE ? NAN : pts * av_q2d(link->time_base);
1610  dstctx->var_values[VAR_W] = link->w;
1611  dstctx->var_values[VAR_H] = link->h;
1612  dstctx->var_values[VAR_POS] = pos == -1 ? NAN : pos;
1613 
1614  return fabs(av_expr_eval(dstctx->enable, dstctx->var_values, NULL)) >= 0.5;
1615 }
1616 
1618 {
1619  av_assert1(!link->status_in);
1620  av_assert1(!link->status_out);
1621  link->frame_wanted_out = 1;
1622  ff_filter_set_ready(link->src, 100);
1623 }
1624 
1626 {
1627  if (link->status_out)
1628  return;
1629  link->frame_wanted_out = 0;
1630  link->frame_blocked_in = 0;
1632  while (ff_framequeue_queued_frames(&link->fifo)) {
1633  AVFrame *frame = ff_framequeue_take(&link->fifo);
1634  av_frame_free(&frame);
1635  }
1636  if (!link->status_in)
1637  link->status_in = status;
1638 }
1639 
1641 {
1642  return link->status_in;
1643 }
1644 
1646 {
1647  return &avfilter_class;
1648 }
1649 
1651  int default_pool_size)
1652 {
1654 
1655  // Must already be set by caller.
1656  av_assert0(link->hw_frames_ctx);
1657 
1658  frames = (AVHWFramesContext*)link->hw_frames_ctx->data;
1659 
1660  if (frames->initial_pool_size == 0) {
1661  // Dynamic allocation is necessarily supported.
1662  } else if (avctx->extra_hw_frames >= 0) {
1663  frames->initial_pool_size += avctx->extra_hw_frames;
1664  } else {
1665  frames->initial_pool_size = default_pool_size;
1666  }
1667 
1668  return 0;
1669 }
double * var_values
variable values for the enable expression
Definition: avfilter.h:387
#define ff_tlog(ctx,...)
Definition: internal.h:86
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link&#39;s FIFO and update the link&#39;s stats.
Definition: avfilter.c:1491
#define NULL
Definition: coverity.c:32
Accept to parse a value without a key; the key will then be returned as NULL.
Definition: opt.h:533
#define FF_FILTER_FLAG_HWFRAME_AWARE
The filter is aware of hardware frames, and any hardware frame context should not be automatically pr...
Definition: internal.h:339
void ff_tlog_ref(void *ctx, AVFrame *ref, int end)
Definition: avfilter.c:49
static int filter_frame(DBEDecodeContext *s, AVFrame *frame)
Definition: dolby_e.c:1049
static void copy(const float *p1, float *p2, const int length)
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
AVFilterContext * ff_filter_alloc(const AVFilter *filter, const char *inst_name)
Allocate a new filter context and return it.
Definition: avfilter.c:662
This structure describes decoded (raw) audio or video data.
Definition: frame.h:314
int thread_type
Type of multithreading allowed for filters in this graph.
Definition: avfilter.h:872
static int default_filter_frame(AVFilterLink *link, AVFrame *frame)
Definition: avfilter.c:1057
AVOption.
Definition: opt.h:248
static int ff_filter_activate_default(AVFilterContext *filter)
Definition: avfilter.c:1275
void avfilter_free(AVFilterContext *filter)
Free a filter context.
Definition: avfilter.c:759
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:585
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
Main libavfilter public API header.
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:505
void ff_channel_layouts_changeref(AVFilterChannelLayouts **oldref, AVFilterChannelLayouts **newref)
Definition: formats.c:527
void av_opt_set_defaults(void *s)
Set the values of all AVOption fields to their default values.
Definition: opt.c:1358
enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
Get the type of an AVFilterPad.
Definition: avfilter.c:1052
void ff_framequeue_skip_samples(FFFrameQueue *fq, size_t samples, AVRational time_base)
Skip samples from the first frame in the queue.
Definition: framequeue.c:126
int num
Numerator.
Definition: rational.h:59
AVBufferRef * hw_device_ctx
For filters which will create hardware frames, sets the device the filter should create them in...
Definition: avfilter.h:397
enum AVMediaType type
AVFilterPad type.
Definition: internal.h:65
#define LIBAVFILTER_VERSION_INT
Definition: version.h:37
const char * key
int(* activate)(AVFilterContext *ctx)
Filter activation function.
Definition: avfilter.h:330
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:685
return FFERROR_NOT_READY
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
int ff_inlink_check_available_samples(AVFilterLink *link, unsigned min)
Test if enough samples are available on the link.
Definition: avfilter.c:1476
static void error(const char *err)
int thread_type
Type of multithreading being allowed/used.
Definition: avfilter.h:376
if it could not because there are no more frames
void(* uninit)(AVFilterContext *ctx)
Filter uninitialization function.
Definition: avfilter.h:259
int is_disabled
the enabled state from the last expression evaluation
Definition: avfilter.h:388
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
Definition: avfilter.c:1617
int nb_threads
Max number of threads allowed in this filter instance.
Definition: avfilter.h:404
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:245
#define AVFILTER_THREAD_SLICE
Process multiple parts of the frame concurrently.
Definition: avfilter.h:336
static const AVClass * filter_child_class_iterate(void **iter)
Definition: avfilter.c:610
struct AVFilterGraph * graph
filtergraph this filter belongs to
Definition: avfilter.h:358
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:126
int(* init_dict)(AVFilterContext *ctx, AVDictionary **options)
Should be set instead of init by the filters that want to pass a dictionary of AVOptions to nested co...
Definition: avfilter.h:247
const char * name
Pad name.
Definition: internal.h:60
int priv_size
size of private data to allocate for the filter
Definition: avfilter.h:285
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:349
char * name
name of this filter instance
Definition: avfilter.h:346
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
const char * name
Definition: opt.h:249
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1093
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:135
AVFilterPad * output_pads
array of output pads
Definition: avfilter.h:352
void ff_frame_pool_uninit(FFFramePool **pool)
Deallocate the frame pool.
Definition: framepool.c:284
const char * avfilter_license(void)
Return the libavfilter license.
Definition: avfilter.c:88
unsigned ready
Ready status of the filter.
Definition: avfilter.h:411
uint8_t
AVOptions.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
#define f(width, name)
Definition: cbs_vp9.c:255
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
const struct AVOption * option
a pointer to the first option specified in the class if any or NULL
Definition: log.h:85
GLsizei GLboolean const GLfloat * value
Definition: opengl_enc.c:108
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:407
Definition: eval.c:157
int flags
A combination of AVFILTER_FLAG_*.
Definition: avfilter.h:188
void ff_command_queue_pop(AVFilterContext *filter)
Definition: avfilter.c:94
static void filter_unblock(AVFilterContext *filter)
Clear frame_blocked_in on all outputs.
Definition: avfilter.c:202
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:879
int avfilter_config_links(AVFilterContext *filter)
Negotiate the media format, dimensions, etc of all inputs to a filter.
Definition: avfilter.c:277
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:88
#define AVERROR_EOF
End of file.
Definition: error.h:55
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:210
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:461
#define max(a, b)
Definition: cuda_runtime.h:33
int(* preinit)(AVFilterContext *ctx)
Filter pre-initialization function.
Definition: avfilter.h:211
static void free_link(AVFilterLink *link)
Definition: avfilter.c:738
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
Definition: mem.c:198
int ff_framequeue_add(FFFrameQueue *fq, AVFrame *frame)
Add a frame.
Definition: framequeue.c:63
#define av_log(a,...)
int extra_hw_frames
Sets the number of extra hardware frames which the filter will allocate on its output links for use i...
Definition: avfilter.h:427
A filter pad used for either input or output.
Definition: internal.h:54
void ff_update_link_current_pts(AVFilterLink *link, int64_t pts)
Definition: avfilter.c:528
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.
Definition: avutil.h:308
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
int ff_inlink_evaluate_timeline_at_frame(AVFilterLink *link, const AVFrame *frame)
Evaluate the timeline expression of the link for the time and properties of the frame.
Definition: avfilter.c:1599
static const AVClass avfilter_class
Definition: avfilter.c:636
#define src
Definition: vp8dsp.c:255
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1446
AVFilterPad * input_pads
array of input pads
Definition: avfilter.h:348
static int forward_status_change(AVFilterContext *filter, AVFilterLink *in)
Definition: avfilter.c:1243
static void consume_update(AVFilterLink *link, const AVFrame *frame)
Definition: avfilter.c:1483
int width
Definition: frame.h:372
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
void ff_formats_changeref(AVFilterFormats **oldref, AVFilterFormats **newref)
Before After |formats |<------—.
Definition: formats.c:533
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
static const char * default_filter_name(void *filter_ctx)
Definition: avfilter.c:572
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:86
#define AV_OPT_FLAG_FILTERING_PARAM
a generic parameter which can be set by the user for filtering
Definition: opt.h:294
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
unsigned nb_outputs
number of output pads
Definition: avfilter.h:354
unsigned avfilter_version(void)
Return the LIBAVFILTER_VERSION_INT constant.
Definition: avfilter.c:77
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options...
Definition: avfilter.c:881
const char * r
Definition: vf_curves.c:116
void * priv
private data for use by the filter
Definition: avfilter.h:356
unsigned int pos
Definition: spdifenc.c:410
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
AVFilterFormats * formats
List of supported formats (pixel or sample).
Definition: avfilter.h:445
char * enable_str
enable expression string
Definition: avfilter.h:385
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:203
const char * arg
Definition: jacosubdec.c:66
simple assert() macros that are a bit more flexible than ISO C assert().
#define FF_TPRINTF_START(ctx, func)
Definition: internal.h:213
const AVOption * av_opt_next(const void *obj, const AVOption *last)
Iterate over all AVOptions belonging to obj.
Definition: opt.c:45
static FilteringContext * filter_ctx
Definition: transcoding.c:47
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:49
GLsizei count
Definition: opengl_enc.c:108
#define FFMAX(a, b)
Definition: common.h:103
Frame pool.
Definition: framepool.c:30
#define fail()
Definition: checkasm.h:133
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
Definition: imgutils.c:422
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:491
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1661
void ff_framequeue_free(FFFrameQueue *fq)
Free the queue and all queued frames.
Definition: framequeue.c:53
common internal API header
static void * filter_child_next(void *obj, void *prev)
Definition: avfilter.c:578
int initial_pool_size
Initial size of the frame pool.
Definition: hwcontext.h:199
#define AV_OPT_FLAG_RUNTIME_PARAM
a generic parameter which can be set by the user at runtime
Definition: opt.h:293
void avfilter_link_set_closed(AVFilterLink *link, int closed)
Set the closed field of a link.
Definition: avfilter.c:235
int(* config_props)(AVFilterLink *link)
Link configuration callback.
Definition: internal.h:118
int channels
number of audio channels, only used for audio.
Definition: frame.h:620
audio channel layout utility functions
static int request_frame(AVFilterLink *outlink)
Definition: aeval.c:274
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:397
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:801
unsigned nb_inputs
number of input pads
Definition: avfilter.h:350
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
#define NAN
Definition: mathematics.h:64
#define FFMIN(a, b)
Definition: common.h:105
AVFilterChannelLayouts * channel_layouts
Lists of supported channel layouts, only for audio.
Definition: avfilter.h:455
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:560
struct AVFilterCommand * next
Definition: internal.h:43
static const AVOption avfilter_options[]
Definition: avfilter.c:624
int ff_inlink_queued_samples(AVFilterLink *link)
Definition: avfilter.c:1471
AVFrame * ff_framequeue_take(FFFrameQueue *fq)
Take the first frame in the queue.
Definition: framequeue.c:98
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
AVFormatContext * ctx
Definition: movenc.c:48
int(* init)(AVFilterContext *ctx)
Filter initialization function.
Definition: avfilter.h:234
static int ff_request_frame_to_filter(AVFilterLink *link)
Definition: avfilter.c:450
void ff_avfilter_link_set_out_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the destination filter.
Definition: avfilter.c:224
int needs_writable
The filter expects writable frames from its input link, duplicating data buffers if needed...
Definition: internal.h:126
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output links
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
int flags_internal
Additional flags for avfilter internal use only.
Definition: avfilter.h:287
static int ff_filter_frame_to_filter(AVFilterLink *link)
Definition: avfilter.c:1211
int ff_inlink_make_frame_writable(AVFilterLink *link, AVFrame **rframe)
Make sure a frame is writable.
Definition: avfilter.c:1535
AVFilterFormats * samplerates
Lists of supported sample rates, only for audio.
Definition: avfilter.h:450
int av_opt_set_dict2(void *obj, AVDictionary **options, int search_flags)
Set all the options from a given dictionary on an object.
Definition: opt.c:1631
AVFrame * ff_inlink_peek_frame(AVFilterLink *link, size_t idx)
Access a frame in the link fifo without consuming it.
Definition: avfilter.c:1530
int( avfilter_action_func)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
A function pointer passed to the AVFilterGraph::execute callback to be executed multiple times...
Definition: avfilter.h:833
int avfilter_init_str(AVFilterContext *filter, const char *args)
Initialize a filter with the supplied parameters.
Definition: avfilter.c:937
if(ret)
const AVFilterPad * inputs
List of inputs, terminated by a zeroed element.
Definition: avfilter.h:165
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
const AVClass * avfilter_get_class(void)
Definition: avfilter.c:1645
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt, unsigned filt_srcpad_idx, unsigned filt_dstpad_idx)
Insert a filter in the middle of an existing link.
Definition: avfilter.c:240
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:387
const AVClass * priv_class
A class for the private data, used to declare filter private AVOptions.
Definition: avfilter.h:183
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:253
int offset
The offset relative to the context structure where the option value is stored.
Definition: opt.h:261
void ff_tlog_link(void *ctx, AVFilterLink *link, int end)
Definition: avfilter.c:383
int(* process_command)(AVFilterContext *, const char *cmd, const char *arg, char *res, int res_len, int flags)
Make the filter instance process a command.
Definition: avfilter.h:309
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:336
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
AVFilterGraphInternal * internal
Opaque object for libavfilter internal use.
Definition: avfilter.h:884
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:594
int av_opt_set_dict(void *obj, AVDictionary **options)
Set all the options from a given dictionary on an object.
Definition: opt.c:1656
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:345
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link&#39;s FIFO and update the link&#39;s stats.
Definition: avfilter.c:1510
uint8_t * data
The data buffer.
Definition: buffer.h:89
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:402
void ff_avfilter_graph_update_heap(AVFilterGraph *graph, AVFilterLink *link)
Update the position of a link in the age heap.
void ff_channel_layouts_unref(AVFilterChannelLayouts **ref)
Remove a reference to a channel layouts list.
Definition: formats.c:509
int avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags)
Make the filter instance process a command.
Definition: avfilter.c:539
int av_samples_copy(uint8_t **dst, uint8_t *const *src, int dst_offset, int src_offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Copy samples from src to dst.
Definition: samplefmt.c:213
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
const char av_filter_ffversion[]
Definition: avfilter.c:47
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
int ff_inlink_process_commands(AVFilterLink *link, const AVFrame *frame)
Process the commands queued in the link up to the time of the frame.
Definition: avfilter.c:1584
int(* func)(AVBPrint *dst, const char *in, const char *arg)
Definition: jacosubdec.c:67
Describe the class of an AVClass context structure.
Definition: log.h:67
int sample_rate
Sample rate of the audio data.
Definition: frame.h:486
Filter definition.
Definition: avfilter.h:145
static int take_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Definition: avfilter.c:1151
static const char *const var_names[]
Definition: avfilter.c:470
int ff_outlink_get_status(AVFilterLink *link)
Get the status on an output link.
Definition: avfilter.c:1640
Rational number (pair of numerator and denominator).
Definition: rational.h:58
const AVFilter * av_filter_iterate(void **opaque)
Iterate over all registered filters.
Definition: allfilters.c:532
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:124
static int64_t guess_status_pts(AVFilterContext *ctx, int status, AVRational link_time_base)
Definition: avfilter.c:432
void ff_inlink_set_status(AVFilterLink *link, int status)
Set the status on an input link.
Definition: avfilter.c:1625
int ff_inlink_check_available_frame(AVFilterLink *link)
Test if a frame is available on the link.
Definition: avfilter.c:1466
AVMediaType
Definition: avutil.h:199
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:101
refcounted data buffer API
void ff_formats_unref(AVFilterFormats **ref)
If *ref is non-NULL, remove *ref as a reference to the format list it currently points to...
Definition: formats.c:504
const char * name
Filter name.
Definition: avfilter.h:149
const char * avfilter_configuration(void)
Return the libavfilter build-time configuration.
Definition: avfilter.c:83
static uint64_t ff_framequeue_queued_samples(const FFFrameQueue *fq)
Get the number of queued samples.
Definition: framequeue.h:154
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
#define TFLAGS
Definition: avfilter.c:623
size_t ff_inlink_queued_frames(AVFilterLink *link)
Get the number of frames available on the link.
Definition: avfilter.c:1461
const char * avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
Get the name of an AVFilterPad.
Definition: avfilter.c:1047
#define FLAGS
Definition: avfilter.c:622
AVFrame * ff_framequeue_peek(FFFrameQueue *fq, size_t idx)
Access a frame in the queue, without removing it.
Definition: framequeue.c:115
#define LICENSE_PREFIX
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:353
int avfilter_link_get_channels(AVFilterLink *link)
Definition: avfilter.c:187
#define OFFSET(x)
Definition: avfilter.c:621
static int64_t pts
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:76
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AVFilterInternal * internal
An opaque struct for libavfilter internal use.
Definition: avfilter.h:381
static int default_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: avfilter.c:649
int(* filter_frame)(AVFilterLink *link, AVFrame *frame)
Filtering callback.
Definition: internal.h:93
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:328
int(* init_opaque)(AVFilterContext *ctx, void *opaque)
Filter initialization function, alternative to the init() callback.
Definition: avfilter.h:316
int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options)
Initialize a filter with the supplied dictionary of options.
Definition: avfilter.c:894
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
int
void av_opt_free(void *obj)
Free all allocated objects in obj.
Definition: opt.c:1611
const OptionDef options[]
Definition: ffmpeg_opt.c:3424
static int set_enable_expr(AVFilterContext *ctx, const char *expr)
Definition: avfilter.c:488
static size_t ff_framequeue_queued_frames(const FFFrameQueue *fq)
Get the number of queued frames.
Definition: framequeue.h:146
static const int8_t filt[NUMTAPS *2]
Definition: af_earwax.c:39
common internal and external API header
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
Utilties for rational number calculation.
#define AVFILTER_FLAG_SUPPORT_TIMELINE
Handy mask to test whether the filter supports or no the timeline feature (internally or generically)...
Definition: avfilter.h:139
struct AVFilterCommand * command_queue
Definition: avfilter.h:383
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
Mark a filter ready and schedule it for activation.
Definition: avfilter.c:193
int ff_filter_init_hw_frames(AVFilterContext *avctx, AVFilterLink *link, int default_pool_size)
Perform any additional setup required for hardware frames.
Definition: avfilter.c:1650
#define AV_OPT_SEARCH_FAKE_OBJ
The obj passed to av_opt_find() is fake – only a double pointer to AVClass instead of a required poi...
Definition: opt.h:569
char * key
Definition: dict.h:86
int den
Denominator.
Definition: rational.h:60
avfilter_execute_func * execute
Definition: internal.h:136
static int samples_ready(AVFilterLink *link, unsigned min)
Definition: avfilter.c:1144
void ff_filter_graph_remove_filter(AVFilterGraph *graph, AVFilterContext *filter)
Remove a filter from a graph;.
avfilter_execute_func * thread_execute
Definition: internal.h:131
#define av_free(p)
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:61
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:466
enum AVOptionType type
Definition: opt.h:262
int av_opt_get_key_value(const char **ropts, const char *key_val_sep, const char *pairs_sep, unsigned flags, char **rkey, char **rval)
Extract a key-value pair from the beginning of a string.
Definition: opt.c:1537
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:766
void * enable
parsed expression (AVExpr*)
Definition: avfilter.h:386
const AVClass * av_class
needed for av_log() and filters common options
Definition: avfilter.h:342
static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame)
Definition: avfilter.c:1062
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:392
void ff_avfilter_link_set_in_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: avfilter.c:211
int(* request_frame)(AVFilterLink *link)
Frame request callback.
Definition: internal.h:102
An instance of a filter.
Definition: avfilter.h:341
#define av_uninit(x)
Definition: attributes.h:154
int avfilter_pad_count(const AVFilterPad *pads)
Get the number of elements in a NULL-terminated array of AVFilterPads (e.g.
Definition: avfilter.c:560
int height
Definition: frame.h:372
FILE * out
Definition: movenc.c:54
const AVFilterPad * outputs
List of outputs, terminated by a zeroed element.
Definition: avfilter.h:173
Filter the word “frame” indicates either a video frame or a group of audio samples
int ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off, AVFilterPad **pads, AVFilterLink ***links, AVFilterPad *newpad)
Insert a new pad.
Definition: avfilter.c:103
#define av_freep(p)
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key, ignoring the suffix of the found key string.
Definition: dict.h:70
FFFrameQueueGlobal frame_queues
Definition: internal.h:132
#define av_malloc_array(a, b)
double time
time expressed in seconds
Definition: internal.h:39
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:407
const AVOption * av_opt_find2(void *obj, const char *name, const char *unit, int opt_flags, int search_flags, void **target_obj)
Look for an option in an object.
Definition: opt.c:1667
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2489
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
void ff_framequeue_init(FFFrameQueue *fq, FFFrameQueueGlobal *fqg)
Init a frame queue and attach it to a global structure.
Definition: framequeue.c:47
int ff_filter_activate(AVFilterContext *filter)
Definition: avfilter.c:1431
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:361
float min
void avfilter_link_free(AVFilterLink **link)
Free the link in *link, and set its pointer to NULL.
Definition: avfilter.c:174
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:465
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:380
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:344
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:658
int i
Definition: input.c:407
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
char * command
command
Definition: internal.h:40
#define av_unused
Definition: attributes.h:131
static int process_options(AVFilterContext *ctx, AVDictionary **options, const char *args)
Definition: avfilter.c:808
simple arithmetic expression evaluator
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:190
char * arg
optional argument for the command
Definition: internal.h:41
#define LIBAVFILTER_VERSION_MICRO
Definition: version.h:34