FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
utils.c
Go to the documentation of this file.
1 /*
2  * various utility functions for use within FFmpeg
3  * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include <stdarg.h>
23 #include <stdint.h>
24 
25 #include "config.h"
26 
27 #include "libavutil/avassert.h"
28 #include "libavutil/avstring.h"
29 #include "libavutil/dict.h"
30 #include "libavutil/internal.h"
31 #include "libavutil/mathematics.h"
32 #include "libavutil/opt.h"
33 #include "libavutil/parseutils.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/time.h"
36 #include "libavutil/timestamp.h"
37 
38 #include "libavcodec/bytestream.h"
39 #include "libavcodec/internal.h"
40 #include "libavcodec/raw.h"
41 
42 #include "audiointerleave.h"
43 #include "avformat.h"
44 #include "avio_internal.h"
45 #include "id3v2.h"
46 #include "internal.h"
47 #include "metadata.h"
48 #if CONFIG_NETWORK
49 #include "network.h"
50 #endif
51 #include "riff.h"
52 #include "url.h"
53 
54 #include "libavutil/ffversion.h"
55 const char av_format_ffversion[] = "FFmpeg version " FFMPEG_VERSION;
56 
57 /**
58  * @file
59  * various utility functions for use within FFmpeg
60  */
61 
62 unsigned avformat_version(void)
63 {
66 }
67 
68 const char *avformat_configuration(void)
69 {
70  return FFMPEG_CONFIGURATION;
71 }
72 
73 const char *avformat_license(void)
74 {
75 #define LICENSE_PREFIX "libavformat license: "
76  return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
77 }
78 
79 #define RELATIVE_TS_BASE (INT64_MAX - (1LL<<48))
80 
81 static int is_relative(int64_t ts) {
82  return ts > (RELATIVE_TS_BASE - (1LL<<48));
83 }
84 
85 /**
86  * Wrap a given time stamp, if there is an indication for an overflow
87  *
88  * @param st stream
89  * @param timestamp the time stamp to wrap
90  * @return resulting time stamp
91  */
92 static int64_t wrap_timestamp(AVStream *st, int64_t timestamp)
93 {
95  st->pts_wrap_reference != AV_NOPTS_VALUE && timestamp != AV_NOPTS_VALUE) {
97  timestamp < st->pts_wrap_reference)
98  return timestamp + (1ULL << st->pts_wrap_bits);
99  else if (st->pts_wrap_behavior == AV_PTS_WRAP_SUB_OFFSET &&
100  timestamp >= st->pts_wrap_reference)
101  return timestamp - (1ULL << st->pts_wrap_bits);
102  }
103  return timestamp;
104 }
105 
106 MAKE_ACCESSORS(AVStream, stream, AVRational, r_frame_rate)
107 MAKE_ACCESSORS(AVStream, stream, char *, recommended_encoder_configuration)
109 MAKE_ACCESSORS(AVFormatContext, format, AVCodec *, audio_codec)
110 MAKE_ACCESSORS(AVFormatContext, format, AVCodec *, subtitle_codec)
111 MAKE_ACCESSORS(AVFormatContext, format, AVCodec *, data_codec)
112 MAKE_ACCESSORS(AVFormatContext, format, int, metadata_header_padding)
113 MAKE_ACCESSORS(AVFormatContext, format, void *, opaque)
114 MAKE_ACCESSORS(AVFormatContext, format, av_format_control_message, control_message_cb)
115 #if FF_API_OLD_OPEN_CALLBACKS
117 MAKE_ACCESSORS(AVFormatContext, format, AVOpenCallback, open_cb)
119 #endif
120 
121 int64_t av_stream_get_end_pts(const AVStream *st)
122 {
123  if (st->priv_pts) {
124  return st->priv_pts->val;
125  } else
126  return AV_NOPTS_VALUE;
127 }
128 
129 struct AVCodecParserContext *av_stream_get_parser(const AVStream *st)
130 {
131  return st->parser;
132 }
133 
134 void av_format_inject_global_side_data(AVFormatContext *s)
135 {
136  int i;
138  for (i = 0; i < s->nb_streams; i++) {
139  AVStream *st = s->streams[i];
140  st->inject_global_side_data = 1;
141  }
142 }
143 
144 int ff_copy_whitelists(AVFormatContext *dst, AVFormatContext *src)
145 {
146  av_assert0(!dst->codec_whitelist &&
147  !dst->format_whitelist &&
148  !dst->protocol_whitelist);
149  dst-> codec_whitelist = av_strdup(src->codec_whitelist);
152  if ( (src-> codec_whitelist && !dst-> codec_whitelist)
153  || (src-> format_whitelist && !dst-> format_whitelist)
154  || (src->protocol_whitelist && !dst->protocol_whitelist)) {
155  av_log(dst, AV_LOG_ERROR, "Failed to duplicate whitelist\n");
156  return AVERROR(ENOMEM);
157  }
158  return 0;
159 }
160 
161 static const AVCodec *find_decoder(AVFormatContext *s, AVStream *st, enum AVCodecID codec_id)
162 {
163  if (st->codec->codec)
164  return st->codec->codec;
165 
166  switch (st->codec->codec_type) {
167  case AVMEDIA_TYPE_VIDEO:
168  if (s->video_codec) return s->video_codec;
169  break;
170  case AVMEDIA_TYPE_AUDIO:
171  if (s->audio_codec) return s->audio_codec;
172  break;
174  if (s->subtitle_codec) return s->subtitle_codec;
175  break;
176  }
177 
178  return avcodec_find_decoder(codec_id);
179 }
180 
181 int av_format_get_probe_score(const AVFormatContext *s)
182 {
183  return s->probe_score;
184 }
185 
186 /* an arbitrarily chosen "sane" max packet size -- 50M */
187 #define SANE_CHUNK_SIZE (50000000)
188 
190 {
191  if (s->maxsize>= 0) {
192  int64_t remaining= s->maxsize - avio_tell(s);
193  if (remaining < size) {
194  int64_t newsize = avio_size(s);
195  if (!s->maxsize || s->maxsize<newsize)
196  s->maxsize = newsize - !newsize;
197  remaining= s->maxsize - avio_tell(s);
198  remaining= FFMAX(remaining, 0);
199  }
200 
201  if (s->maxsize>= 0 && remaining+1 < size) {
202  av_log(NULL, remaining ? AV_LOG_ERROR : AV_LOG_DEBUG, "Truncating packet of size %d to %"PRId64"\n", size, remaining+1);
203  size = remaining+1;
204  }
205  }
206  return size;
207 }
208 
209 /* Read the data in sane-sized chunks and append to pkt.
210  * Return the number of bytes read or an error. */
212 {
213  int64_t orig_pos = pkt->pos; // av_grow_packet might reset pos
214  int orig_size = pkt->size;
215  int ret;
216 
217  do {
218  int prev_size = pkt->size;
219  int read_size;
220 
221  /* When the caller requests a lot of data, limit it to the amount
222  * left in file or SANE_CHUNK_SIZE when it is not known. */
223  read_size = size;
224  if (read_size > SANE_CHUNK_SIZE/10) {
225  read_size = ffio_limit(s, read_size);
226  // If filesize/maxsize is unknown, limit to SANE_CHUNK_SIZE
227  if (s->maxsize < 0)
228  read_size = FFMIN(read_size, SANE_CHUNK_SIZE);
229  }
230 
231  ret = av_grow_packet(pkt, read_size);
232  if (ret < 0)
233  break;
234 
235  ret = avio_read(s, pkt->data + prev_size, read_size);
236  if (ret != read_size) {
237  av_shrink_packet(pkt, prev_size + FFMAX(ret, 0));
238  break;
239  }
240 
241  size -= read_size;
242  } while (size > 0);
243  if (size > 0)
244  pkt->flags |= AV_PKT_FLAG_CORRUPT;
245 
246  pkt->pos = orig_pos;
247  if (!pkt->size)
248  av_packet_unref(pkt);
249  return pkt->size > orig_size ? pkt->size - orig_size : ret;
250 }
251 
253 {
254  av_init_packet(pkt);
255  pkt->data = NULL;
256  pkt->size = 0;
257  pkt->pos = avio_tell(s);
258 
259  return append_packet_chunked(s, pkt, size);
260 }
261 
263 {
264  if (!pkt->size)
265  return av_get_packet(s, pkt, size);
266  return append_packet_chunked(s, pkt, size);
267 }
268 
269 int av_filename_number_test(const char *filename)
270 {
271  char buf[1024];
272  return filename &&
273  (av_get_frame_filename(buf, sizeof(buf), filename, 1) >= 0);
274 }
275 
276 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st,
277  AVProbeData *pd)
278 {
279  static const struct {
280  const char *name;
281  enum AVCodecID id;
282  enum AVMediaType type;
283  } fmt_id_type[] = {
294  { "mpegvideo", AV_CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
295  { 0 }
296  };
297  int score;
298  AVInputFormat *fmt = av_probe_input_format3(pd, 1, &score);
299 
300  if (fmt && st->request_probe <= score) {
301  int i;
302  av_log(s, AV_LOG_DEBUG,
303  "Probe with size=%d, packets=%d detected %s with score=%d\n",
305  fmt->name, score);
306  for (i = 0; fmt_id_type[i].name; i++) {
307  if (!strcmp(fmt->name, fmt_id_type[i].name)) {
308  st->codec->codec_id = fmt_id_type[i].id;
309  st->codec->codec_type = fmt_id_type[i].type;
310  return score;
311  }
312  }
313  }
314  return 0;
315 }
316 
317 /************************************************************/
318 /* input media file */
319 
320 int av_demuxer_open(AVFormatContext *ic) {
321  int err;
322 
323  if (ic->format_whitelist && av_match_list(ic->iformat->name, ic->format_whitelist, ',') <= 0) {
324  av_log(ic, AV_LOG_ERROR, "Format not on whitelist \'%s\'\n", ic->format_whitelist);
325  return AVERROR(EINVAL);
326  }
327 
328  if (ic->iformat->read_header) {
329  err = ic->iformat->read_header(ic);
330  if (err < 0)
331  return err;
332  }
333 
334  if (ic->pb && !ic->internal->data_offset)
335  ic->internal->data_offset = avio_tell(ic->pb);
336 
337  return 0;
338 }
339 
340 /* Open input file and probe the format if necessary. */
341 static int init_input(AVFormatContext *s, const char *filename,
343 {
344  int ret;
345  AVProbeData pd = { filename, NULL, 0 };
346  int score = AVPROBE_SCORE_RETRY;
347 
348  if (s->pb) {
350  if (!s->iformat)
351  return av_probe_input_buffer2(s->pb, &s->iformat, filename,
352  s, 0, s->format_probesize);
353  else if (s->iformat->flags & AVFMT_NOFILE)
354  av_log(s, AV_LOG_WARNING, "Custom AVIOContext makes no sense and "
355  "will be ignored with AVFMT_NOFILE format.\n");
356  return 0;
357  }
358 
359  if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
360  (!s->iformat && (s->iformat = av_probe_input_format2(&pd, 0, &score))))
361  return score;
362 
363  if ((ret = s->io_open(s, &s->pb, filename, AVIO_FLAG_READ | s->avio_flags, options)) < 0)
364  return ret;
365 
366  if (s->iformat)
367  return 0;
368  return av_probe_input_buffer2(s->pb, &s->iformat, filename,
369  s, 0, s->format_probesize);
370 }
371 
372 static int add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
373  AVPacketList **plast_pktl, int ref)
374 {
375  AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
376  int ret;
377 
378  if (!pktl)
379  return AVERROR(ENOMEM);
380 
381  if (ref) {
382  if ((ret = av_packet_ref(&pktl->pkt, pkt)) < 0) {
383  av_free(pktl);
384  return ret;
385  }
386  } else {
387  pktl->pkt = *pkt;
388  }
389 
390  if (*packet_buffer)
391  (*plast_pktl)->next = pktl;
392  else
393  *packet_buffer = pktl;
394 
395  /* Add the packet in the buffered packet list. */
396  *plast_pktl = pktl;
397  return 0;
398 }
399 
400 int avformat_queue_attached_pictures(AVFormatContext *s)
401 {
402  int i, ret;
403  for (i = 0; i < s->nb_streams; i++)
405  s->streams[i]->discard < AVDISCARD_ALL) {
406  if (s->streams[i]->attached_pic.size <= 0) {
408  "Attached picture on stream %d has invalid size, "
409  "ignoring\n", i);
410  continue;
411  }
412 
414  &s->streams[i]->attached_pic,
416  if (ret < 0)
417  return ret;
418  }
419  return 0;
420 }
421 
422 int avformat_open_input(AVFormatContext **ps, const char *filename,
424 {
425  AVFormatContext *s = *ps;
426  int ret = 0;
427  AVDictionary *tmp = NULL;
428  ID3v2ExtraMeta *id3v2_extra_meta = NULL;
429 
430  if (!s && !(s = avformat_alloc_context()))
431  return AVERROR(ENOMEM);
432  if (!s->av_class) {
433  av_log(NULL, AV_LOG_ERROR, "Input context has not been properly allocated by avformat_alloc_context() and is not NULL either\n");
434  return AVERROR(EINVAL);
435  }
436  if (fmt)
437  s->iformat = fmt;
438 
439  if (options)
440  av_dict_copy(&tmp, *options, 0);
441 
442  if (s->pb) // must be before any goto fail
444 
445  if ((ret = av_opt_set_dict(s, &tmp)) < 0)
446  goto fail;
447 
448  if ((ret = init_input(s, filename, &tmp)) < 0)
449  goto fail;
450  s->probe_score = ret;
451 
452  if (!s->protocol_whitelist && s->pb && s->pb->protocol_whitelist) {
454  if (!s->protocol_whitelist) {
455  ret = AVERROR(ENOMEM);
456  goto fail;
457  }
458  }
459 
460  if (s->format_whitelist && av_match_list(s->iformat->name, s->format_whitelist, ',') <= 0) {
461  av_log(s, AV_LOG_ERROR, "Format not on whitelist \'%s\'\n", s->format_whitelist);
462  ret = AVERROR(EINVAL);
463  goto fail;
464  }
465 
467 
468  /* Check filename in case an image number is expected. */
469  if (s->iformat->flags & AVFMT_NEEDNUMBER) {
470  if (!av_filename_number_test(filename)) {
471  ret = AVERROR(EINVAL);
472  goto fail;
473  }
474  }
475 
477  av_strlcpy(s->filename, filename ? filename : "", sizeof(s->filename));
478 
479  /* Allocate private data. */
480  if (s->iformat->priv_data_size > 0) {
481  if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
482  ret = AVERROR(ENOMEM);
483  goto fail;
484  }
485  if (s->iformat->priv_class) {
486  *(const AVClass **) s->priv_data = s->iformat->priv_class;
488  if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
489  goto fail;
490  }
491  }
492 
493  /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
494  if (s->pb)
495  ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta, 0);
496 
497  if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->iformat->read_header)
498  if ((ret = s->iformat->read_header(s)) < 0)
499  goto fail;
500 
501  if (id3v2_extra_meta) {
502  if (!strcmp(s->iformat->name, "mp3") || !strcmp(s->iformat->name, "aac") ||
503  !strcmp(s->iformat->name, "tta")) {
504  if ((ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
505  goto fail;
506  } else
507  av_log(s, AV_LOG_DEBUG, "demuxer does not support additional id3 data, skipping\n");
508  }
509  ff_id3v2_free_extra_meta(&id3v2_extra_meta);
510 
511  if ((ret = avformat_queue_attached_pictures(s)) < 0)
512  goto fail;
513 
514  if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->pb && !s->internal->data_offset)
515  s->internal->data_offset = avio_tell(s->pb);
516 
518 
519  if (options) {
520  av_dict_free(options);
521  *options = tmp;
522  }
523  *ps = s;
524  return 0;
525 
526 fail:
527  ff_id3v2_free_extra_meta(&id3v2_extra_meta);
528  av_dict_free(&tmp);
529  if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
530  avio_closep(&s->pb);
532  *ps = NULL;
533  return ret;
534 }
535 
536 /*******************************************************/
537 
538 static void force_codec_ids(AVFormatContext *s, AVStream *st)
539 {
540  switch (st->codec->codec_type) {
541  case AVMEDIA_TYPE_VIDEO:
542  if (s->video_codec_id)
543  st->codec->codec_id = s->video_codec_id;
544  break;
545  case AVMEDIA_TYPE_AUDIO:
546  if (s->audio_codec_id)
547  st->codec->codec_id = s->audio_codec_id;
548  break;
550  if (s->subtitle_codec_id)
551  st->codec->codec_id = s->subtitle_codec_id;
552  break;
553  }
554 }
555 
556 static int probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt)
557 {
558  if (st->request_probe>0) {
559  AVProbeData *pd = &st->probe_data;
560  int end;
561  av_log(s, AV_LOG_DEBUG, "probing stream %d pp:%d\n", st->index, st->probe_packets);
562  --st->probe_packets;
563 
564  if (pkt) {
565  uint8_t *new_buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
566  if (!new_buf) {
568  "Failed to reallocate probe buffer for stream %d\n",
569  st->index);
570  goto no_packet;
571  }
572  pd->buf = new_buf;
573  memcpy(pd->buf + pd->buf_size, pkt->data, pkt->size);
574  pd->buf_size += pkt->size;
575  memset(pd->buf + pd->buf_size, 0, AVPROBE_PADDING_SIZE);
576  } else {
577 no_packet:
578  st->probe_packets = 0;
579  if (!pd->buf_size) {
581  "nothing to probe for stream %d\n", st->index);
582  }
583  }
584 
586  || st->probe_packets<= 0;
587 
588  if (end || av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)) {
589  int score = set_codec_from_probe_data(s, st, pd);
591  || end) {
592  pd->buf_size = 0;
593  av_freep(&pd->buf);
594  st->request_probe = -1;
595  if (st->codec->codec_id != AV_CODEC_ID_NONE) {
596  av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
597  } else
598  av_log(s, AV_LOG_WARNING, "probed stream %d failed\n", st->index);
599  }
600  force_codec_ids(s, st);
601  }
602  }
603  return 0;
604 }
605 
606 static int update_wrap_reference(AVFormatContext *s, AVStream *st, int stream_index, AVPacket *pkt)
607 {
608  int64_t ref = pkt->dts;
609  int i, pts_wrap_behavior;
610  int64_t pts_wrap_reference;
611  AVProgram *first_program;
612 
613  if (ref == AV_NOPTS_VALUE)
614  ref = pkt->pts;
615  if (st->pts_wrap_reference != AV_NOPTS_VALUE || st->pts_wrap_bits >= 63 || ref == AV_NOPTS_VALUE || !s->correct_ts_overflow)
616  return 0;
617  ref &= (1LL << st->pts_wrap_bits)-1;
618 
619  // reference time stamp should be 60 s before first time stamp
620  pts_wrap_reference = ref - av_rescale(60, st->time_base.den, st->time_base.num);
621  // if first time stamp is not more than 1/8 and 60s before the wrap point, subtract rather than add wrap offset
622  pts_wrap_behavior = (ref < (1LL << st->pts_wrap_bits) - (1LL << st->pts_wrap_bits-3)) ||
623  (ref < (1LL << st->pts_wrap_bits) - av_rescale(60, st->time_base.den, st->time_base.num)) ?
625 
626  first_program = av_find_program_from_stream(s, NULL, stream_index);
627 
628  if (!first_program) {
629  int default_stream_index = av_find_default_stream_index(s);
630  if (s->streams[default_stream_index]->pts_wrap_reference == AV_NOPTS_VALUE) {
631  for (i = 0; i < s->nb_streams; i++) {
633  continue;
634  s->streams[i]->pts_wrap_reference = pts_wrap_reference;
635  s->streams[i]->pts_wrap_behavior = pts_wrap_behavior;
636  }
637  }
638  else {
639  st->pts_wrap_reference = s->streams[default_stream_index]->pts_wrap_reference;
640  st->pts_wrap_behavior = s->streams[default_stream_index]->pts_wrap_behavior;
641  }
642  }
643  else {
644  AVProgram *program = first_program;
645  while (program) {
646  if (program->pts_wrap_reference != AV_NOPTS_VALUE) {
647  pts_wrap_reference = program->pts_wrap_reference;
648  pts_wrap_behavior = program->pts_wrap_behavior;
649  break;
650  }
651  program = av_find_program_from_stream(s, program, stream_index);
652  }
653 
654  // update every program with differing pts_wrap_reference
655  program = first_program;
656  while (program) {
657  if (program->pts_wrap_reference != pts_wrap_reference) {
658  for (i = 0; i<program->nb_stream_indexes; i++) {
659  s->streams[program->stream_index[i]]->pts_wrap_reference = pts_wrap_reference;
660  s->streams[program->stream_index[i]]->pts_wrap_behavior = pts_wrap_behavior;
661  }
662 
663  program->pts_wrap_reference = pts_wrap_reference;
664  program->pts_wrap_behavior = pts_wrap_behavior;
665  }
666  program = av_find_program_from_stream(s, program, stream_index);
667  }
668  }
669  return 1;
670 }
671 
672 int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
673 {
674  int ret, i, err;
675  AVStream *st;
676 
677  for (;;) {
679 
680  if (pktl) {
681  *pkt = pktl->pkt;
682  st = s->streams[pkt->stream_index];
684  if ((err = probe_codec(s, st, NULL)) < 0)
685  return err;
686  if (st->request_probe <= 0) {
687  s->internal->raw_packet_buffer = pktl->next;
689  av_free(pktl);
690  return 0;
691  }
692  }
693 
694  pkt->data = NULL;
695  pkt->size = 0;
696  av_init_packet(pkt);
697  ret = s->iformat->read_packet(s, pkt);
698  if (ret < 0) {
699  /* Some demuxers return FFERROR_REDO when they consume
700  data and discard it (ignored streams, junk, extradata).
701  We must re-call the demuxer to get the real packet. */
702  if (ret == FFERROR_REDO)
703  continue;
704  if (!pktl || ret == AVERROR(EAGAIN))
705  return ret;
706  for (i = 0; i < s->nb_streams; i++) {
707  st = s->streams[i];
708  if (st->probe_packets)
709  if ((err = probe_codec(s, st, NULL)) < 0)
710  return err;
711  av_assert0(st->request_probe <= 0);
712  }
713  continue;
714  }
715 
716  if (!pkt->buf) {
717  AVPacket tmp = { 0 };
718  ret = av_packet_ref(&tmp, pkt);
719  if (ret < 0)
720  return ret;
721  *pkt = tmp;
722  }
723 
724  if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
725  (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
727  "Dropped corrupted packet (stream = %d)\n",
728  pkt->stream_index);
729  av_packet_unref(pkt);
730  continue;
731  }
732 
733  if (pkt->stream_index >= (unsigned)s->nb_streams) {
734  av_log(s, AV_LOG_ERROR, "Invalid stream index %d\n", pkt->stream_index);
735  continue;
736  }
737 
738  st = s->streams[pkt->stream_index];
739 
741  // correct first time stamps to negative values
742  if (!is_relative(st->first_dts))
743  st->first_dts = wrap_timestamp(st, st->first_dts);
744  if (!is_relative(st->start_time))
745  st->start_time = wrap_timestamp(st, st->start_time);
746  if (!is_relative(st->cur_dts))
747  st->cur_dts = wrap_timestamp(st, st->cur_dts);
748  }
749 
750  pkt->dts = wrap_timestamp(st, pkt->dts);
751  pkt->pts = wrap_timestamp(st, pkt->pts);
752 
753  force_codec_ids(s, st);
754 
755  /* TODO: audio: time filter; video: frame reordering (pts != dts) */
757  pkt->dts = pkt->pts = av_rescale_q(av_gettime(), AV_TIME_BASE_Q, st->time_base);
758 
759  if (!pktl && st->request_probe <= 0)
760  return ret;
761 
762  err = add_to_pktbuf(&s->internal->raw_packet_buffer, pkt,
764  if (err)
765  return err;
767 
768  if ((err = probe_codec(s, st, pkt)) < 0)
769  return err;
770  }
771 }
772 
773 
774 /**********************************************************/
775 
777 {
778  if (/*avctx->codec_id == AV_CODEC_ID_AAC ||*/
779  avctx->codec_id == AV_CODEC_ID_MP1 ||
780  avctx->codec_id == AV_CODEC_ID_MP2 ||
781  avctx->codec_id == AV_CODEC_ID_MP3/* ||
782  avctx->codec_id == AV_CODEC_ID_CELT*/)
783  return 1;
784  return 0;
785 }
786 
787 /**
788  * Return the frame duration in seconds. Return 0 if not available.
789  */
790 void ff_compute_frame_duration(AVFormatContext *s, int *pnum, int *pden, AVStream *st,
792 {
793  AVRational codec_framerate = s->iformat ? st->codec->framerate :
795  int frame_size;
796 
797  *pnum = 0;
798  *pden = 0;
799  switch (st->codec->codec_type) {
800  case AVMEDIA_TYPE_VIDEO:
801  if (st->r_frame_rate.num && !pc && s->iformat) {
802  *pnum = st->r_frame_rate.den;
803  *pden = st->r_frame_rate.num;
804  } else if (st->time_base.num * 1000LL > st->time_base.den) {
805  *pnum = st->time_base.num;
806  *pden = st->time_base.den;
807  } else if (codec_framerate.den * 1000LL > codec_framerate.num) {
808  av_assert0(st->codec->ticks_per_frame);
809  av_reduce(pnum, pden,
810  codec_framerate.den,
811  codec_framerate.num * (int64_t)st->codec->ticks_per_frame,
812  INT_MAX);
813 
814  if (pc && pc->repeat_pict) {
815  av_assert0(s->iformat); // this may be wrong for interlaced encoding but its not used for that case
816  av_reduce(pnum, pden,
817  (*pnum) * (1LL + pc->repeat_pict),
818  (*pden),
819  INT_MAX);
820  }
821  /* If this codec can be interlaced or progressive then we need
822  * a parser to compute duration of a packet. Thus if we have
823  * no parser in such case leave duration undefined. */
824  if (st->codec->ticks_per_frame > 1 && !pc)
825  *pnum = *pden = 0;
826  }
827  break;
828  case AVMEDIA_TYPE_AUDIO:
829  frame_size = av_get_audio_frame_duration(st->codec, pkt->size);
830  if (frame_size <= 0 || st->codec->sample_rate <= 0)
831  break;
832  *pnum = frame_size;
833  *pden = st->codec->sample_rate;
834  break;
835  default:
836  break;
837  }
838 }
839 
840 static int is_intra_only(AVCodecContext *enc) {
841  const AVCodecDescriptor *desc;
842 
843  if (enc->codec_type != AVMEDIA_TYPE_VIDEO)
844  return 1;
845 
846  desc = av_codec_get_codec_descriptor(enc);
847  if (!desc) {
848  desc = avcodec_descriptor_get(enc->codec_id);
850  }
851  if (desc)
852  return !!(desc->props & AV_CODEC_PROP_INTRA_ONLY);
853  return 0;
854 }
855 
856 static int has_decode_delay_been_guessed(AVStream *st)
857 {
858  if (st->codec->codec_id != AV_CODEC_ID_H264) return 1;
859  if (!st->info) // if we have left find_stream_info then nb_decoded_frames won't increase anymore for stream copy
860  return 1;
861 #if CONFIG_H264_DECODER
862  if (st->codec->has_b_frames &&
864  return 1;
865 #endif
866  if (st->codec->has_b_frames<3)
867  return st->nb_decoded_frames >= 7;
868  else if (st->codec->has_b_frames<4)
869  return st->nb_decoded_frames >= 18;
870  else
871  return st->nb_decoded_frames >= 20;
872 }
873 
874 static AVPacketList *get_next_pkt(AVFormatContext *s, AVStream *st, AVPacketList *pktl)
875 {
876  if (pktl->next)
877  return pktl->next;
878  if (pktl == s->internal->packet_buffer_end)
879  return s->internal->parse_queue;
880  return NULL;
881 }
882 
883 static int64_t select_from_pts_buffer(AVStream *st, int64_t *pts_buffer, int64_t dts) {
884  int onein_oneout = st->codec->codec_id != AV_CODEC_ID_H264 &&
886 
887  if(!onein_oneout) {
888  int delay = st->codec->has_b_frames;
889  int i;
890 
891  if (dts == AV_NOPTS_VALUE) {
892  int64_t best_score = INT64_MAX;
893  for (i = 0; i<delay; i++) {
894  if (st->pts_reorder_error_count[i]) {
895  int64_t score = st->pts_reorder_error[i] / st->pts_reorder_error_count[i];
896  if (score < best_score) {
897  best_score = score;
898  dts = pts_buffer[i];
899  }
900  }
901  }
902  } else {
903  for (i = 0; i<delay; i++) {
904  if (pts_buffer[i] != AV_NOPTS_VALUE) {
905  int64_t diff = FFABS(pts_buffer[i] - dts)
906  + (uint64_t)st->pts_reorder_error[i];
907  diff = FFMAX(diff, st->pts_reorder_error[i]);
908  st->pts_reorder_error[i] = diff;
909  st->pts_reorder_error_count[i]++;
910  if (st->pts_reorder_error_count[i] > 250) {
911  st->pts_reorder_error[i] >>= 1;
912  st->pts_reorder_error_count[i] >>= 1;
913  }
914  }
915  }
916  }
917  }
918 
919  if (dts == AV_NOPTS_VALUE)
920  dts = pts_buffer[0];
921 
922  return dts;
923 }
924 
925 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
926  int64_t dts, int64_t pts, AVPacket *pkt)
927 {
928  AVStream *st = s->streams[stream_index];
930  int64_t pts_buffer[MAX_REORDER_DELAY+1];
931  uint64_t shift;
932  int i, delay;
933 
934  if (st->first_dts != AV_NOPTS_VALUE ||
935  dts == AV_NOPTS_VALUE ||
936  st->cur_dts == AV_NOPTS_VALUE ||
937  is_relative(dts))
938  return;
939 
940  delay = st->codec->has_b_frames;
941  st->first_dts = dts - (st->cur_dts - RELATIVE_TS_BASE);
942  st->cur_dts = dts;
943  shift = (uint64_t)st->first_dts - RELATIVE_TS_BASE;
944 
945  for (i = 0; i<MAX_REORDER_DELAY+1; i++)
946  pts_buffer[i] = AV_NOPTS_VALUE;
947 
948  if (is_relative(pts))
949  pts += shift;
950 
951  for (; pktl; pktl = get_next_pkt(s, st, pktl)) {
952  if (pktl->pkt.stream_index != stream_index)
953  continue;
954  if (is_relative(pktl->pkt.pts))
955  pktl->pkt.pts += shift;
956 
957  if (is_relative(pktl->pkt.dts))
958  pktl->pkt.dts += shift;
959 
960  if (st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
961  st->start_time = pktl->pkt.pts;
962 
963  if (pktl->pkt.pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY && has_decode_delay_been_guessed(st)) {
964  pts_buffer[0] = pktl->pkt.pts;
965  for (i = 0; i<delay && pts_buffer[i] > pts_buffer[i + 1]; i++)
966  FFSWAP(int64_t, pts_buffer[i], pts_buffer[i + 1]);
967 
968  pktl->pkt.dts = select_from_pts_buffer(st, pts_buffer, pktl->pkt.dts);
969  }
970  }
971 
972  if (st->start_time == AV_NOPTS_VALUE)
973  st->start_time = pts;
974 }
975 
976 static void update_initial_durations(AVFormatContext *s, AVStream *st,
977  int stream_index, int duration)
978 {
980  int64_t cur_dts = RELATIVE_TS_BASE;
981 
982  if (st->first_dts != AV_NOPTS_VALUE) {
984  return;
986  cur_dts = st->first_dts;
987  for (; pktl; pktl = get_next_pkt(s, st, pktl)) {
988  if (pktl->pkt.stream_index == stream_index) {
989  if (pktl->pkt.pts != pktl->pkt.dts ||
990  pktl->pkt.dts != AV_NOPTS_VALUE ||
991  pktl->pkt.duration)
992  break;
993  cur_dts -= duration;
994  }
995  }
996  if (pktl && pktl->pkt.dts != st->first_dts) {
997  av_log(s, AV_LOG_DEBUG, "first_dts %s not matching first dts %s (pts %s, duration %"PRId64") in the queue\n",
998  av_ts2str(st->first_dts), av_ts2str(pktl->pkt.dts), av_ts2str(pktl->pkt.pts), pktl->pkt.duration);
999  return;
1000  }
1001  if (!pktl) {
1002  av_log(s, AV_LOG_DEBUG, "first_dts %s but no packet with dts in the queue\n", av_ts2str(st->first_dts));
1003  return;
1004  }
1006  st->first_dts = cur_dts;
1007  } else if (st->cur_dts != RELATIVE_TS_BASE)
1008  return;
1009 
1010  for (; pktl; pktl = get_next_pkt(s, st, pktl)) {
1011  if (pktl->pkt.stream_index != stream_index)
1012  continue;
1013  if (pktl->pkt.pts == pktl->pkt.dts &&
1014  (pktl->pkt.dts == AV_NOPTS_VALUE || pktl->pkt.dts == st->first_dts) &&
1015  !pktl->pkt.duration) {
1016  pktl->pkt.dts = cur_dts;
1017  if (!st->codec->has_b_frames)
1018  pktl->pkt.pts = cur_dts;
1019 // if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
1020  pktl->pkt.duration = duration;
1021  } else
1022  break;
1023  cur_dts = pktl->pkt.dts + pktl->pkt.duration;
1024  }
1025  if (!pktl)
1026  st->cur_dts = cur_dts;
1027 }
1028 
1029 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
1031  int64_t next_dts, int64_t next_pts)
1032 {
1033  int num, den, presentation_delayed, delay, i;
1034  int64_t offset;
1036  int onein_oneout = st->codec->codec_id != AV_CODEC_ID_H264 &&
1038 
1039  if (s->flags & AVFMT_FLAG_NOFILLIN)
1040  return;
1041 
1042  if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO && pkt->dts != AV_NOPTS_VALUE) {
1043  if (pkt->dts == pkt->pts && st->last_dts_for_order_check != AV_NOPTS_VALUE) {
1044  if (st->last_dts_for_order_check <= pkt->dts) {
1045  st->dts_ordered++;
1046  } else {
1048  "DTS %"PRIi64" < %"PRIi64" out of order\n",
1049  pkt->dts,
1051  st->dts_misordered++;
1052  }
1053  if (st->dts_ordered + st->dts_misordered > 250) {
1054  st->dts_ordered >>= 1;
1055  st->dts_misordered >>= 1;
1056  }
1057  }
1058 
1059  st->last_dts_for_order_check = pkt->dts;
1060  if (st->dts_ordered < 8*st->dts_misordered && pkt->dts == pkt->pts)
1061  pkt->dts = AV_NOPTS_VALUE;
1062  }
1063 
1064  if ((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
1065  pkt->dts = AV_NOPTS_VALUE;
1066 
1067  if (pc && pc->pict_type == AV_PICTURE_TYPE_B
1068  && !st->codec->has_b_frames)
1069  //FIXME Set low_delay = 0 when has_b_frames = 1
1070  st->codec->has_b_frames = 1;
1071 
1072  /* do we have a video B-frame ? */
1073  delay = st->codec->has_b_frames;
1074  presentation_delayed = 0;
1075 
1076  /* XXX: need has_b_frame, but cannot get it if the codec is
1077  * not initialized */
1078  if (delay &&
1079  pc && pc->pict_type != AV_PICTURE_TYPE_B)
1080  presentation_delayed = 1;
1081 
1082  if (pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE &&
1083  st->pts_wrap_bits < 63 &&
1084  pkt->dts - (1LL << (st->pts_wrap_bits - 1)) > pkt->pts) {
1085  if (is_relative(st->cur_dts) || pkt->dts - (1LL<<(st->pts_wrap_bits - 1)) > st->cur_dts) {
1086  pkt->dts -= 1LL << st->pts_wrap_bits;
1087  } else
1088  pkt->pts += 1LL << st->pts_wrap_bits;
1089  }
1090 
1091  /* Some MPEG-2 in MPEG-PS lack dts (issue #171 / input_file.mpg).
1092  * We take the conservative approach and discard both.
1093  * Note: If this is misbehaving for an H.264 file, then possibly
1094  * presentation_delayed is not set correctly. */
1095  if (delay == 1 && pkt->dts == pkt->pts &&
1096  pkt->dts != AV_NOPTS_VALUE && presentation_delayed) {
1097  av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination %"PRIi64"\n", pkt->dts);
1098  if ( strcmp(s->iformat->name, "mov,mp4,m4a,3gp,3g2,mj2")
1099  && strcmp(s->iformat->name, "flv")) // otherwise we discard correct timestamps for vc1-wmapro.ism
1100  pkt->dts = AV_NOPTS_VALUE;
1101  }
1102 
1103  duration = av_mul_q((AVRational) {pkt->duration, 1}, st->time_base);
1104  if (pkt->duration == 0) {
1105  ff_compute_frame_duration(s, &num, &den, st, pc, pkt);
1106  if (den && num) {
1107  duration = (AVRational) {num, den};
1108  pkt->duration = av_rescale_rnd(1,
1109  num * (int64_t) st->time_base.den,
1110  den * (int64_t) st->time_base.num,
1111  AV_ROUND_DOWN);
1112  }
1113  }
1114 
1115  if (pkt->duration != 0 && (s->internal->packet_buffer || s->internal->parse_queue))
1116  update_initial_durations(s, st, pkt->stream_index, pkt->duration);
1117 
1118  /* Correct timestamps with byte offset if demuxers only have timestamps
1119  * on packet boundaries */
1120  if (pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size) {
1121  /* this will estimate bitrate based on this frame's duration and size */
1122  offset = av_rescale(pc->offset, pkt->duration, pkt->size);
1123  if (pkt->pts != AV_NOPTS_VALUE)
1124  pkt->pts += offset;
1125  if (pkt->dts != AV_NOPTS_VALUE)
1126  pkt->dts += offset;
1127  }
1128 
1129  /* This may be redundant, but it should not hurt. */
1130  if (pkt->dts != AV_NOPTS_VALUE &&
1131  pkt->pts != AV_NOPTS_VALUE &&
1132  pkt->pts > pkt->dts)
1133  presentation_delayed = 1;
1134 
1135  if (s->debug & FF_FDEBUG_TS)
1136  av_log(s, AV_LOG_TRACE,
1137  "IN delayed:%d pts:%s, dts:%s cur_dts:%s st:%d pc:%p duration:%"PRId64" delay:%d onein_oneout:%d\n",
1138  presentation_delayed, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts),
1139  pkt->stream_index, pc, pkt->duration, delay, onein_oneout);
1140 
1141  /* Interpolate PTS and DTS if they are not present. We skip H264
1142  * currently because delay and has_b_frames are not reliably set. */
1143  if ((delay == 0 || (delay == 1 && pc)) &&
1144  onein_oneout) {
1145  if (presentation_delayed) {
1146  /* DTS = decompression timestamp */
1147  /* PTS = presentation timestamp */
1148  if (pkt->dts == AV_NOPTS_VALUE)
1149  pkt->dts = st->last_IP_pts;
1150  update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts, pkt);
1151  if (pkt->dts == AV_NOPTS_VALUE)
1152  pkt->dts = st->cur_dts;
1153 
1154  /* This is tricky: the dts must be incremented by the duration
1155  * of the frame we are displaying, i.e. the last I- or P-frame. */
1156  if (st->last_IP_duration == 0)
1157  st->last_IP_duration = pkt->duration;
1158  if (pkt->dts != AV_NOPTS_VALUE)
1159  st->cur_dts = pkt->dts + st->last_IP_duration;
1160  if (pkt->dts != AV_NOPTS_VALUE &&
1161  pkt->pts == AV_NOPTS_VALUE &&
1162  st->last_IP_duration > 0 &&
1163  ((uint64_t)st->cur_dts - (uint64_t)next_dts + 1) <= 2 &&
1164  next_dts != next_pts &&
1165  next_pts != AV_NOPTS_VALUE)
1166  pkt->pts = next_dts;
1167 
1168  st->last_IP_duration = pkt->duration;
1169  st->last_IP_pts = pkt->pts;
1170  /* Cannot compute PTS if not present (we can compute it only
1171  * by knowing the future. */
1172  } else if (pkt->pts != AV_NOPTS_VALUE ||
1173  pkt->dts != AV_NOPTS_VALUE ||
1174  pkt->duration ) {
1175 
1176  /* presentation is not delayed : PTS and DTS are the same */
1177  if (pkt->pts == AV_NOPTS_VALUE)
1178  pkt->pts = pkt->dts;
1180  pkt->pts, pkt);
1181  if (pkt->pts == AV_NOPTS_VALUE)
1182  pkt->pts = st->cur_dts;
1183  pkt->dts = pkt->pts;
1184  if (pkt->pts != AV_NOPTS_VALUE)
1185  st->cur_dts = av_add_stable(st->time_base, pkt->pts, duration, 1);
1186  }
1187  }
1188 
1189  if (pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY && has_decode_delay_been_guessed(st)) {
1190  st->pts_buffer[0] = pkt->pts;
1191  for (i = 0; i<delay && st->pts_buffer[i] > st->pts_buffer[i + 1]; i++)
1192  FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i + 1]);
1193 
1194  pkt->dts = select_from_pts_buffer(st, st->pts_buffer, pkt->dts);
1195  }
1196  // We skipped it above so we try here.
1197  if (!onein_oneout)
1198  // This should happen on the first packet
1199  update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts, pkt);
1200  if (pkt->dts > st->cur_dts)
1201  st->cur_dts = pkt->dts;
1202 
1203  if (s->debug & FF_FDEBUG_TS)
1204  av_log(s, AV_LOG_TRACE, "OUTdelayed:%d/%d pts:%s, dts:%s cur_dts:%s\n",
1205  presentation_delayed, delay, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts));
1206 
1207  /* update flags */
1208  if (is_intra_only(st->codec))
1209  pkt->flags |= AV_PKT_FLAG_KEY;
1210 #if FF_API_CONVERGENCE_DURATION
1212  if (pc)
1215 #endif
1216 }
1217 
1218 static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
1219 {
1220  while (*pkt_buf) {
1221  AVPacketList *pktl = *pkt_buf;
1222  *pkt_buf = pktl->next;
1223  av_packet_unref(&pktl->pkt);
1224  av_freep(&pktl);
1225  }
1226  *pkt_buf_end = NULL;
1227 }
1228 
1229 /**
1230  * Parse a packet, add all split parts to parse_queue.
1231  *
1232  * @param pkt Packet to parse, NULL when flushing the parser at end of stream.
1233  */
1234 static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
1235 {
1236  AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
1237  AVStream *st = s->streams[stream_index];
1238  uint8_t *data = pkt ? pkt->data : NULL;
1239  int size = pkt ? pkt->size : 0;
1240  int ret = 0, got_output = 0;
1241 
1242  if (!pkt) {
1244  pkt = &flush_pkt;
1245  got_output = 1;
1246  } else if (!size && st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) {
1247  // preserve 0-size sync packets
1249  }
1250 
1251  while (size > 0 || (pkt == &flush_pkt && got_output)) {
1252  int len;
1253  int64_t next_pts = pkt->pts;
1254  int64_t next_dts = pkt->dts;
1255 
1256  av_init_packet(&out_pkt);
1257  len = av_parser_parse2(st->parser, st->codec,
1258  &out_pkt.data, &out_pkt.size, data, size,
1259  pkt->pts, pkt->dts, pkt->pos);
1260 
1261  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
1262  pkt->pos = -1;
1263  /* increment read pointer */
1264  data += len;
1265  size -= len;
1266 
1267  got_output = !!out_pkt.size;
1268 
1269  if (!out_pkt.size)
1270  continue;
1271 
1272  if (pkt->side_data) {
1273  out_pkt.side_data = pkt->side_data;
1274  out_pkt.side_data_elems = pkt->side_data_elems;
1275  pkt->side_data = NULL;
1276  pkt->side_data_elems = 0;
1277  }
1278 
1279  /* set the duration */
1280  out_pkt.duration = (st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) ? pkt->duration : 0;
1282  if (st->codec->sample_rate > 0) {
1283  out_pkt.duration =
1285  (AVRational) { 1, st->codec->sample_rate },
1286  st->time_base,
1287  AV_ROUND_DOWN);
1288  }
1289  }
1290 
1291  out_pkt.stream_index = st->index;
1292  out_pkt.pts = st->parser->pts;
1293  out_pkt.dts = st->parser->dts;
1294  out_pkt.pos = st->parser->pos;
1295 
1297  out_pkt.pos = st->parser->frame_offset;
1298 
1299  if (st->parser->key_frame == 1 ||
1300  (st->parser->key_frame == -1 &&
1302  out_pkt.flags |= AV_PKT_FLAG_KEY;
1303 
1304  if (st->parser->key_frame == -1 && st->parser->pict_type ==AV_PICTURE_TYPE_NONE && (pkt->flags&AV_PKT_FLAG_KEY))
1305  out_pkt.flags |= AV_PKT_FLAG_KEY;
1306 
1307  compute_pkt_fields(s, st, st->parser, &out_pkt, next_dts, next_pts);
1308 
1309  ret = add_to_pktbuf(&s->internal->parse_queue, &out_pkt,
1310  &s->internal->parse_queue_end, 1);
1311  av_packet_unref(&out_pkt);
1312  if (ret < 0)
1313  goto fail;
1314  }
1315 
1316  /* end of the stream => close and free the parser */
1317  if (pkt == &flush_pkt) {
1318  av_parser_close(st->parser);
1319  st->parser = NULL;
1320  }
1321 
1322 fail:
1324  return ret;
1325 }
1326 
1327 static int read_from_packet_buffer(AVPacketList **pkt_buffer,
1328  AVPacketList **pkt_buffer_end,
1329  AVPacket *pkt)
1330 {
1331  AVPacketList *pktl;
1332  av_assert0(*pkt_buffer);
1333  pktl = *pkt_buffer;
1334  *pkt = pktl->pkt;
1335  *pkt_buffer = pktl->next;
1336  if (!pktl->next)
1337  *pkt_buffer_end = NULL;
1338  av_freep(&pktl);
1339  return 0;
1340 }
1341 
1342 static int64_t ts_to_samples(AVStream *st, int64_t ts)
1343 {
1344  return av_rescale(ts, st->time_base.num * st->codec->sample_rate, st->time_base.den);
1345 }
1346 
1347 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
1348 {
1349  int ret = 0, i, got_packet = 0;
1350  AVDictionary *metadata = NULL;
1351 
1352  av_init_packet(pkt);
1353 
1354  while (!got_packet && !s->internal->parse_queue) {
1355  AVStream *st;
1356  AVPacket cur_pkt;
1357 
1358  /* read next packet */
1359  ret = ff_read_packet(s, &cur_pkt);
1360  if (ret < 0) {
1361  if (ret == AVERROR(EAGAIN))
1362  return ret;
1363  /* flush the parsers */
1364  for (i = 0; i < s->nb_streams; i++) {
1365  st = s->streams[i];
1366  if (st->parser && st->need_parsing)
1367  parse_packet(s, NULL, st->index);
1368  }
1369  /* all remaining packets are now in parse_queue =>
1370  * really terminate parsing */
1371  break;
1372  }
1373  ret = 0;
1374  st = s->streams[cur_pkt.stream_index];
1375 
1376  if (cur_pkt.pts != AV_NOPTS_VALUE &&
1377  cur_pkt.dts != AV_NOPTS_VALUE &&
1378  cur_pkt.pts < cur_pkt.dts) {
1380  "Invalid timestamps stream=%d, pts=%s, dts=%s, size=%d\n",
1381  cur_pkt.stream_index,
1382  av_ts2str(cur_pkt.pts),
1383  av_ts2str(cur_pkt.dts),
1384  cur_pkt.size);
1385  }
1386  if (s->debug & FF_FDEBUG_TS)
1387  av_log(s, AV_LOG_DEBUG,
1388  "ff_read_packet stream=%d, pts=%s, dts=%s, size=%d, duration=%"PRId64", flags=%d\n",
1389  cur_pkt.stream_index,
1390  av_ts2str(cur_pkt.pts),
1391  av_ts2str(cur_pkt.dts),
1392  cur_pkt.size, cur_pkt.duration, cur_pkt.flags);
1393 
1394  if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1395  st->parser = av_parser_init(st->codec->codec_id);
1396  if (!st->parser) {
1397  av_log(s, AV_LOG_VERBOSE, "parser not found for codec "
1398  "%s, packets or times may be invalid.\n",
1400  /* no parser available: just output the raw packets */
1402  } else if (st->need_parsing == AVSTREAM_PARSE_HEADERS)
1404  else if (st->need_parsing == AVSTREAM_PARSE_FULL_ONCE)
1405  st->parser->flags |= PARSER_FLAG_ONCE;
1406  else if (st->need_parsing == AVSTREAM_PARSE_FULL_RAW)
1408  }
1409 
1410  if (!st->need_parsing || !st->parser) {
1411  /* no parsing needed: we just output the packet as is */
1412  *pkt = cur_pkt;
1414  if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1415  (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1416  ff_reduce_index(s, st->index);
1417  av_add_index_entry(st, pkt->pos, pkt->dts,
1418  0, 0, AVINDEX_KEYFRAME);
1419  }
1420  got_packet = 1;
1421  } else if (st->discard < AVDISCARD_ALL) {
1422  if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
1423  return ret;
1424  } else {
1425  /* free packet */
1426  av_packet_unref(&cur_pkt);
1427  }
1428  if (pkt->flags & AV_PKT_FLAG_KEY)
1429  st->skip_to_keyframe = 0;
1430  if (st->skip_to_keyframe) {
1431  av_packet_unref(&cur_pkt);
1432  if (got_packet) {
1433  *pkt = cur_pkt;
1434  }
1435  got_packet = 0;
1436  }
1437  }
1438 
1439  if (!got_packet && s->internal->parse_queue)
1441 
1442  if (ret >= 0) {
1443  AVStream *st = s->streams[pkt->stream_index];
1444  int discard_padding = 0;
1445  if (st->first_discard_sample && pkt->pts != AV_NOPTS_VALUE) {
1446  int64_t pts = pkt->pts - (is_relative(pkt->pts) ? RELATIVE_TS_BASE : 0);
1447  int64_t sample = ts_to_samples(st, pts);
1448  int duration = ts_to_samples(st, pkt->duration);
1449  int64_t end_sample = sample + duration;
1450  if (duration > 0 && end_sample >= st->first_discard_sample &&
1451  sample < st->last_discard_sample)
1452  discard_padding = FFMIN(end_sample - st->first_discard_sample, duration);
1453  }
1454  if (st->start_skip_samples && (pkt->pts == 0 || pkt->pts == RELATIVE_TS_BASE))
1455  st->skip_samples = st->start_skip_samples;
1456  if (st->skip_samples || discard_padding) {
1458  if (p) {
1459  AV_WL32(p, st->skip_samples);
1460  AV_WL32(p + 4, discard_padding);
1461  av_log(s, AV_LOG_DEBUG, "demuxer injecting skip %d / discard %d\n", st->skip_samples, discard_padding);
1462  }
1463  st->skip_samples = 0;
1464  }
1465 
1466  if (st->inject_global_side_data) {
1467  for (i = 0; i < st->nb_side_data; i++) {
1468  AVPacketSideData *src_sd = &st->side_data[i];
1469  uint8_t *dst_data;
1470 
1471  if (av_packet_get_side_data(pkt, src_sd->type, NULL))
1472  continue;
1473 
1474  dst_data = av_packet_new_side_data(pkt, src_sd->type, src_sd->size);
1475  if (!dst_data) {
1476  av_log(s, AV_LOG_WARNING, "Could not inject global side data\n");
1477  continue;
1478  }
1479 
1480  memcpy(dst_data, src_sd->data, src_sd->size);
1481  }
1482  st->inject_global_side_data = 0;
1483  }
1484 
1485  if (!(s->flags & AVFMT_FLAG_KEEP_SIDE_DATA))
1487  }
1488 
1489  av_opt_get_dict_val(s, "metadata", AV_OPT_SEARCH_CHILDREN, &metadata);
1490  if (metadata) {
1492  av_dict_copy(&s->metadata, metadata, 0);
1493  av_dict_free(&metadata);
1495  }
1496 
1497  if (s->debug & FF_FDEBUG_TS)
1498  av_log(s, AV_LOG_DEBUG,
1499  "read_frame_internal stream=%d, pts=%s, dts=%s, "
1500  "size=%d, duration=%"PRId64", flags=%d\n",
1501  pkt->stream_index,
1502  av_ts2str(pkt->pts),
1503  av_ts2str(pkt->dts),
1504  pkt->size, pkt->duration, pkt->flags);
1505 
1506  return ret;
1507 }
1508 
1509 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1510 {
1511  const int genpts = s->flags & AVFMT_FLAG_GENPTS;
1512  int eof = 0;
1513  int ret;
1514  AVStream *st;
1515 
1516  if (!genpts) {
1517  ret = s->internal->packet_buffer
1519  &s->internal->packet_buffer_end, pkt)
1520  : read_frame_internal(s, pkt);
1521  if (ret < 0)
1522  return ret;
1523  goto return_packet;
1524  }
1525 
1526  for (;;) {
1527  AVPacketList *pktl = s->internal->packet_buffer;
1528 
1529  if (pktl) {
1530  AVPacket *next_pkt = &pktl->pkt;
1531 
1532  if (next_pkt->dts != AV_NOPTS_VALUE) {
1533  int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1534  // last dts seen for this stream. if any of packets following
1535  // current one had no dts, we will set this to AV_NOPTS_VALUE.
1536  int64_t last_dts = next_pkt->dts;
1537  while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
1538  if (pktl->pkt.stream_index == next_pkt->stream_index &&
1539  (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0)) {
1540  if (av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) {
1541  // not B-frame
1542  next_pkt->pts = pktl->pkt.dts;
1543  }
1544  if (last_dts != AV_NOPTS_VALUE) {
1545  // Once last dts was set to AV_NOPTS_VALUE, we don't change it.
1546  last_dts = pktl->pkt.dts;
1547  }
1548  }
1549  pktl = pktl->next;
1550  }
1551  if (eof && next_pkt->pts == AV_NOPTS_VALUE && last_dts != AV_NOPTS_VALUE) {
1552  // Fixing the last reference frame had none pts issue (For MXF etc).
1553  // We only do this when
1554  // 1. eof.
1555  // 2. we are not able to resolve a pts value for current packet.
1556  // 3. the packets for this stream at the end of the files had valid dts.
1557  next_pkt->pts = last_dts + next_pkt->duration;
1558  }
1559  pktl = s->internal->packet_buffer;
1560  }
1561 
1562  /* read packet from packet buffer, if there is data */
1563  st = s->streams[next_pkt->stream_index];
1564  if (!(next_pkt->pts == AV_NOPTS_VALUE && st->discard < AVDISCARD_ALL &&
1565  next_pkt->dts != AV_NOPTS_VALUE && !eof)) {
1567  &s->internal->packet_buffer_end, pkt);
1568  goto return_packet;
1569  }
1570  }
1571 
1572  ret = read_frame_internal(s, pkt);
1573  if (ret < 0) {
1574  if (pktl && ret != AVERROR(EAGAIN)) {
1575  eof = 1;
1576  continue;
1577  } else
1578  return ret;
1579  }
1580 
1581  ret = add_to_pktbuf(&s->internal->packet_buffer, pkt,
1582  &s->internal->packet_buffer_end, 1);
1583  av_packet_unref(pkt);
1584  if (ret < 0)
1585  return ret;
1586  }
1587 
1588 return_packet:
1589 
1590  st = s->streams[pkt->stream_index];
1591  if ((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY) {
1592  ff_reduce_index(s, st->index);
1593  av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1594  }
1595 
1596  if (is_relative(pkt->dts))
1597  pkt->dts -= RELATIVE_TS_BASE;
1598  if (is_relative(pkt->pts))
1599  pkt->pts -= RELATIVE_TS_BASE;
1600 
1601  return ret;
1602 }
1603 
1604 /* XXX: suppress the packet queue */
1605 static void flush_packet_queue(AVFormatContext *s)
1606 {
1607  if (!s->internal)
1608  return;
1612 
1614 }
1615 
1616 /*******************************************************/
1617 /* seek support */
1618 
1619 int av_find_default_stream_index(AVFormatContext *s)
1620 {
1621  int i;
1622  AVStream *st;
1623  int best_stream = 0;
1624  int best_score = INT_MIN;
1625 
1626  if (s->nb_streams <= 0)
1627  return -1;
1628  for (i = 0; i < s->nb_streams; i++) {
1629  int score = 0;
1630  st = s->streams[i];
1631  if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
1633  score -= 400;
1634  if (st->codec->width && st->codec->height)
1635  score += 50;
1636  score+= 25;
1637  }
1638  if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
1639  if (st->codec->sample_rate)
1640  score += 50;
1641  }
1642  if (st->codec_info_nb_frames)
1643  score += 12;
1644 
1645  if (st->discard != AVDISCARD_ALL)
1646  score += 200;
1647 
1648  if (score > best_score) {
1649  best_score = score;
1650  best_stream = i;
1651  }
1652  }
1653  return best_stream;
1654 }
1655 
1656 /** Flush the frame reader. */
1657 void ff_read_frame_flush(AVFormatContext *s)
1658 {
1659  AVStream *st;
1660  int i, j;
1661 
1662  flush_packet_queue(s);
1663 
1664  /* Reset read state for each stream. */
1665  for (i = 0; i < s->nb_streams; i++) {
1666  st = s->streams[i];
1667 
1668  if (st->parser) {
1669  av_parser_close(st->parser);
1670  st->parser = NULL;
1671  }
1674  if (st->first_dts == AV_NOPTS_VALUE)
1675  st->cur_dts = RELATIVE_TS_BASE;
1676  else
1677  /* We set the current DTS to an unspecified origin. */
1678  st->cur_dts = AV_NOPTS_VALUE;
1679 
1681 
1682  for (j = 0; j < MAX_REORDER_DELAY + 1; j++)
1683  st->pts_buffer[j] = AV_NOPTS_VALUE;
1684 
1686  st->inject_global_side_data = 1;
1687 
1688  st->skip_samples = 0;
1689  }
1690 }
1691 
1692 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1693 {
1694  int i;
1695 
1696  for (i = 0; i < s->nb_streams; i++) {
1697  AVStream *st = s->streams[i];
1698 
1699  st->cur_dts =
1700  av_rescale(timestamp,
1701  st->time_base.den * (int64_t) ref_st->time_base.num,
1702  st->time_base.num * (int64_t) ref_st->time_base.den);
1703  }
1704 }
1705 
1706 void ff_reduce_index(AVFormatContext *s, int stream_index)
1707 {
1708  AVStream *st = s->streams[stream_index];
1709  unsigned int max_entries = s->max_index_size / sizeof(AVIndexEntry);
1710 
1711  if ((unsigned) st->nb_index_entries >= max_entries) {
1712  int i;
1713  for (i = 0; 2 * i < st->nb_index_entries; i++)
1714  st->index_entries[i] = st->index_entries[2 * i];
1715  st->nb_index_entries = i;
1716  }
1717 }
1718 
1719 int ff_add_index_entry(AVIndexEntry **index_entries,
1720  int *nb_index_entries,
1721  unsigned int *index_entries_allocated_size,
1722  int64_t pos, int64_t timestamp,
1723  int size, int distance, int flags)
1724 {
1725  AVIndexEntry *entries, *ie;
1726  int index;
1727 
1728  if ((unsigned) *nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1729  return -1;
1730 
1731  if (timestamp == AV_NOPTS_VALUE)
1732  return AVERROR(EINVAL);
1733 
1734  if (size < 0 || size > 0x3FFFFFFF)
1735  return AVERROR(EINVAL);
1736 
1737  if (is_relative(timestamp)) //FIXME this maintains previous behavior but we should shift by the correct offset once known
1738  timestamp -= RELATIVE_TS_BASE;
1739 
1740  entries = av_fast_realloc(*index_entries,
1741  index_entries_allocated_size,
1742  (*nb_index_entries + 1) *
1743  sizeof(AVIndexEntry));
1744  if (!entries)
1745  return -1;
1746 
1747  *index_entries = entries;
1748 
1749  index = ff_index_search_timestamp(*index_entries, *nb_index_entries,
1750  timestamp, AVSEEK_FLAG_ANY);
1751 
1752  if (index < 0) {
1753  index = (*nb_index_entries)++;
1754  ie = &entries[index];
1755  av_assert0(index == 0 || ie[-1].timestamp < timestamp);
1756  } else {
1757  ie = &entries[index];
1758  if (ie->timestamp != timestamp) {
1759  if (ie->timestamp <= timestamp)
1760  return -1;
1761  memmove(entries + index + 1, entries + index,
1762  sizeof(AVIndexEntry) * (*nb_index_entries - index));
1763  (*nb_index_entries)++;
1764  } else if (ie->pos == pos && distance < ie->min_distance)
1765  // do not reduce the distance
1766  distance = ie->min_distance;
1767  }
1768 
1769  ie->pos = pos;
1770  ie->timestamp = timestamp;
1771  ie->min_distance = distance;
1772  ie->size = size;
1773  ie->flags = flags;
1774 
1775  return index;
1776 }
1777 
1778 int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp,
1779  int size, int distance, int flags)
1780 {
1781  timestamp = wrap_timestamp(st, timestamp);
1783  &st->index_entries_allocated_size, pos,
1784  timestamp, size, distance, flags);
1785 }
1786 
1787 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1788  int64_t wanted_timestamp, int flags)
1789 {
1790  int a, b, m;
1791  int64_t timestamp;
1792 
1793  a = -1;
1794  b = nb_entries;
1795 
1796  // Optimize appending index entries at the end.
1797  if (b && entries[b - 1].timestamp < wanted_timestamp)
1798  a = b - 1;
1799 
1800  while (b - a > 1) {
1801  m = (a + b) >> 1;
1802  timestamp = entries[m].timestamp;
1803  if (timestamp >= wanted_timestamp)
1804  b = m;
1805  if (timestamp <= wanted_timestamp)
1806  a = m;
1807  }
1808  m = (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1809 
1810  if (!(flags & AVSEEK_FLAG_ANY))
1811  while (m >= 0 && m < nb_entries &&
1812  !(entries[m].flags & AVINDEX_KEYFRAME))
1813  m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1814 
1815  if (m == nb_entries)
1816  return -1;
1817  return m;
1818 }
1819 
1820 void ff_configure_buffers_for_index(AVFormatContext *s, int64_t time_tolerance)
1821 {
1822  int ist1, ist2;
1823  int64_t pos_delta = 0;
1824  int64_t skip = 0;
1825  //We could use URLProtocol flags here but as many user applications do not use URLProtocols this would be unreliable
1826  const char *proto = avio_find_protocol_name(s->filename);
1827 
1828  if (!proto) {
1829  av_log(s, AV_LOG_INFO,
1830  "Protocol name not provided, cannot determine if input is local or "
1831  "a network protocol, buffers and access patterns cannot be configured "
1832  "optimally without knowing the protocol\n");
1833  }
1834 
1835  if (proto && !(strcmp(proto, "file") && strcmp(proto, "pipe") && strcmp(proto, "cache")))
1836  return;
1837 
1838  for (ist1 = 0; ist1 < s->nb_streams; ist1++) {
1839  AVStream *st1 = s->streams[ist1];
1840  for (ist2 = 0; ist2 < s->nb_streams; ist2++) {
1841  AVStream *st2 = s->streams[ist2];
1842  int i1, i2;
1843 
1844  if (ist1 == ist2)
1845  continue;
1846 
1847  for (i1 = i2 = 0; i1 < st1->nb_index_entries; i1++) {
1848  AVIndexEntry *e1 = &st1->index_entries[i1];
1849  int64_t e1_pts = av_rescale_q(e1->timestamp, st1->time_base, AV_TIME_BASE_Q);
1850 
1851  skip = FFMAX(skip, e1->size);
1852  for (; i2 < st2->nb_index_entries; i2++) {
1853  AVIndexEntry *e2 = &st2->index_entries[i2];
1854  int64_t e2_pts = av_rescale_q(e2->timestamp, st2->time_base, AV_TIME_BASE_Q);
1855  if (e2_pts - e1_pts < time_tolerance)
1856  continue;
1857  pos_delta = FFMAX(pos_delta, e1->pos - e2->pos);
1858  break;
1859  }
1860  }
1861  }
1862  }
1863 
1864  pos_delta *= 2;
1865  /* XXX This could be adjusted depending on protocol*/
1866  if (s->pb->buffer_size < pos_delta && pos_delta < (1<<24)) {
1867  av_log(s, AV_LOG_VERBOSE, "Reconfiguring buffers to size %"PRId64"\n", pos_delta);
1868  ffio_set_buf_size(s->pb, pos_delta);
1869  s->pb->short_seek_threshold = FFMAX(s->pb->short_seek_threshold, pos_delta/2);
1870  }
1871 
1872  if (skip < (1<<23)) {
1874  }
1875 }
1876 
1877 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp, int flags)
1878 {
1880  wanted_timestamp, flags);
1881 }
1882 
1883 static int64_t ff_read_timestamp(AVFormatContext *s, int stream_index, int64_t *ppos, int64_t pos_limit,
1884  int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1885 {
1886  int64_t ts = read_timestamp(s, stream_index, ppos, pos_limit);
1887  if (stream_index >= 0)
1888  ts = wrap_timestamp(s->streams[stream_index], ts);
1889  return ts;
1890 }
1891 
1892 int ff_seek_frame_binary(AVFormatContext *s, int stream_index,
1893  int64_t target_ts, int flags)
1894 {
1895  AVInputFormat *avif = s->iformat;
1896  int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1897  int64_t ts_min, ts_max, ts;
1898  int index;
1899  int64_t ret;
1900  AVStream *st;
1901 
1902  if (stream_index < 0)
1903  return -1;
1904 
1905  av_log(s, AV_LOG_TRACE, "read_seek: %d %s\n", stream_index, av_ts2str(target_ts));
1906 
1907  ts_max =
1908  ts_min = AV_NOPTS_VALUE;
1909  pos_limit = -1; // GCC falsely says it may be uninitialized.
1910 
1911  st = s->streams[stream_index];
1912  if (st->index_entries) {
1913  AVIndexEntry *e;
1914 
1915  /* FIXME: Whole function must be checked for non-keyframe entries in
1916  * index case, especially read_timestamp(). */
1917  index = av_index_search_timestamp(st, target_ts,
1918  flags | AVSEEK_FLAG_BACKWARD);
1919  index = FFMAX(index, 0);
1920  e = &st->index_entries[index];
1921 
1922  if (e->timestamp <= target_ts || e->pos == e->min_distance) {
1923  pos_min = e->pos;
1924  ts_min = e->timestamp;
1925  av_log(s, AV_LOG_TRACE, "using cached pos_min=0x%"PRIx64" dts_min=%s\n",
1926  pos_min, av_ts2str(ts_min));
1927  } else {
1928  av_assert1(index == 0);
1929  }
1930 
1931  index = av_index_search_timestamp(st, target_ts,
1932  flags & ~AVSEEK_FLAG_BACKWARD);
1933  av_assert0(index < st->nb_index_entries);
1934  if (index >= 0) {
1935  e = &st->index_entries[index];
1936  av_assert1(e->timestamp >= target_ts);
1937  pos_max = e->pos;
1938  ts_max = e->timestamp;
1939  pos_limit = pos_max - e->min_distance;
1940  av_log(s, AV_LOG_TRACE, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64
1941  " dts_max=%s\n", pos_max, pos_limit, av_ts2str(ts_max));
1942  }
1943  }
1944 
1945  pos = ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit,
1946  ts_min, ts_max, flags, &ts, avif->read_timestamp);
1947  if (pos < 0)
1948  return -1;
1949 
1950  /* do the seek */
1951  if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1952  return ret;
1953 
1955  ff_update_cur_dts(s, st, ts);
1956 
1957  return 0;
1958 }
1959 
1960 int ff_find_last_ts(AVFormatContext *s, int stream_index, int64_t *ts, int64_t *pos,
1961  int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1962 {
1963  int64_t step = 1024;
1964  int64_t limit, ts_max;
1965  int64_t filesize = avio_size(s->pb);
1966  int64_t pos_max = filesize - 1;
1967  do {
1968  limit = pos_max;
1969  pos_max = FFMAX(0, (pos_max) - step);
1970  ts_max = ff_read_timestamp(s, stream_index,
1971  &pos_max, limit, read_timestamp);
1972  step += step;
1973  } while (ts_max == AV_NOPTS_VALUE && 2*limit > step);
1974  if (ts_max == AV_NOPTS_VALUE)
1975  return -1;
1976 
1977  for (;;) {
1978  int64_t tmp_pos = pos_max + 1;
1979  int64_t tmp_ts = ff_read_timestamp(s, stream_index,
1980  &tmp_pos, INT64_MAX, read_timestamp);
1981  if (tmp_ts == AV_NOPTS_VALUE)
1982  break;
1983  av_assert0(tmp_pos > pos_max);
1984  ts_max = tmp_ts;
1985  pos_max = tmp_pos;
1986  if (tmp_pos >= filesize)
1987  break;
1988  }
1989 
1990  if (ts)
1991  *ts = ts_max;
1992  if (pos)
1993  *pos = pos_max;
1994 
1995  return 0;
1996 }
1997 
1998 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1999  int64_t pos_min, int64_t pos_max, int64_t pos_limit,
2000  int64_t ts_min, int64_t ts_max,
2001  int flags, int64_t *ts_ret,
2002  int64_t (*read_timestamp)(struct AVFormatContext *, int,
2003  int64_t *, int64_t))
2004 {
2005  int64_t pos, ts;
2006  int64_t start_pos;
2007  int no_change;
2008  int ret;
2009 
2010  av_log(s, AV_LOG_TRACE, "gen_seek: %d %s\n", stream_index, av_ts2str(target_ts));
2011 
2012  if (ts_min == AV_NOPTS_VALUE) {
2013  pos_min = s->internal->data_offset;
2014  ts_min = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp);
2015  if (ts_min == AV_NOPTS_VALUE)
2016  return -1;
2017  }
2018 
2019  if (ts_min >= target_ts) {
2020  *ts_ret = ts_min;
2021  return pos_min;
2022  }
2023 
2024  if (ts_max == AV_NOPTS_VALUE) {
2025  if ((ret = ff_find_last_ts(s, stream_index, &ts_max, &pos_max, read_timestamp)) < 0)
2026  return ret;
2027  pos_limit = pos_max;
2028  }
2029 
2030  if (ts_max <= target_ts) {
2031  *ts_ret = ts_max;
2032  return pos_max;
2033  }
2034 
2035  av_assert0(ts_min < ts_max);
2036 
2037  no_change = 0;
2038  while (pos_min < pos_limit) {
2039  av_log(s, AV_LOG_TRACE,
2040  "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%s dts_max=%s\n",
2041  pos_min, pos_max, av_ts2str(ts_min), av_ts2str(ts_max));
2042  av_assert0(pos_limit <= pos_max);
2043 
2044  if (no_change == 0) {
2045  int64_t approximate_keyframe_distance = pos_max - pos_limit;
2046  // interpolate position (better than dichotomy)
2047  pos = av_rescale(target_ts - ts_min, pos_max - pos_min,
2048  ts_max - ts_min) +
2049  pos_min - approximate_keyframe_distance;
2050  } else if (no_change == 1) {
2051  // bisection if interpolation did not change min / max pos last time
2052  pos = (pos_min + pos_limit) >> 1;
2053  } else {
2054  /* linear search if bisection failed, can only happen if there
2055  * are very few or no keyframes between min/max */
2056  pos = pos_min;
2057  }
2058  if (pos <= pos_min)
2059  pos = pos_min + 1;
2060  else if (pos > pos_limit)
2061  pos = pos_limit;
2062  start_pos = pos;
2063 
2064  // May pass pos_limit instead of -1.
2065  ts = ff_read_timestamp(s, stream_index, &pos, INT64_MAX, read_timestamp);
2066  if (pos == pos_max)
2067  no_change++;
2068  else
2069  no_change = 0;
2070  av_log(s, AV_LOG_TRACE, "%"PRId64" %"PRId64" %"PRId64" / %s %s %s"
2071  " target:%s limit:%"PRId64" start:%"PRId64" noc:%d\n",
2072  pos_min, pos, pos_max,
2073  av_ts2str(ts_min), av_ts2str(ts), av_ts2str(ts_max), av_ts2str(target_ts),
2074  pos_limit, start_pos, no_change);
2075  if (ts == AV_NOPTS_VALUE) {
2076  av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
2077  return -1;
2078  }
2079  if (target_ts <= ts) {
2080  pos_limit = start_pos - 1;
2081  pos_max = pos;
2082  ts_max = ts;
2083  }
2084  if (target_ts >= ts) {
2085  pos_min = pos;
2086  ts_min = ts;
2087  }
2088  }
2089 
2090  pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
2091  ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
2092 #if 0
2093  pos_min = pos;
2094  ts_min = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp);
2095  pos_min++;
2096  ts_max = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp);
2097  av_log(s, AV_LOG_TRACE, "pos=0x%"PRIx64" %s<=%s<=%s\n",
2098  pos, av_ts2str(ts_min), av_ts2str(target_ts), av_ts2str(ts_max));
2099 #endif
2100  *ts_ret = ts;
2101  return pos;
2102 }
2103 
2104 static int seek_frame_byte(AVFormatContext *s, int stream_index,
2105  int64_t pos, int flags)
2106 {
2107  int64_t pos_min, pos_max;
2108 
2109  pos_min = s->internal->data_offset;
2110  pos_max = avio_size(s->pb) - 1;
2111 
2112  if (pos < pos_min)
2113  pos = pos_min;
2114  else if (pos > pos_max)
2115  pos = pos_max;
2116 
2117  avio_seek(s->pb, pos, SEEK_SET);
2118 
2119  s->io_repositioned = 1;
2120 
2121  return 0;
2122 }
2123 
2124 static int seek_frame_generic(AVFormatContext *s, int stream_index,
2125  int64_t timestamp, int flags)
2126 {
2127  int index;
2128  int64_t ret;
2129  AVStream *st;
2130  AVIndexEntry *ie;
2131 
2132  st = s->streams[stream_index];
2133 
2134  index = av_index_search_timestamp(st, timestamp, flags);
2135 
2136  if (index < 0 && st->nb_index_entries &&
2137  timestamp < st->index_entries[0].timestamp)
2138  return -1;
2139 
2140  if (index < 0 || index == st->nb_index_entries - 1) {
2141  AVPacket pkt;
2142  int nonkey = 0;
2143 
2144  if (st->nb_index_entries) {
2146  ie = &st->index_entries[st->nb_index_entries - 1];
2147  if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
2148  return ret;
2149  ff_update_cur_dts(s, st, ie->timestamp);
2150  } else {
2151  if ((ret = avio_seek(s->pb, s->internal->data_offset, SEEK_SET)) < 0)
2152  return ret;
2153  }
2154  for (;;) {
2155  int read_status;
2156  do {
2157  read_status = av_read_frame(s, &pkt);
2158  } while (read_status == AVERROR(EAGAIN));
2159  if (read_status < 0)
2160  break;
2161  if (stream_index == pkt.stream_index && pkt.dts > timestamp) {
2162  if (pkt.flags & AV_PKT_FLAG_KEY) {
2163  av_packet_unref(&pkt);
2164  break;
2165  }
2166  if (nonkey++ > 1000 && st->codec->codec_id != AV_CODEC_ID_CDGRAPHICS) {
2167  av_log(s, AV_LOG_ERROR,"seek_frame_generic failed as this stream seems to contain no keyframes after the target timestamp, %d non keyframes found\n", nonkey);
2168  av_packet_unref(&pkt);
2169  break;
2170  }
2171  }
2172  av_packet_unref(&pkt);
2173  }
2174  index = av_index_search_timestamp(st, timestamp, flags);
2175  }
2176  if (index < 0)
2177  return -1;
2178 
2180  if (s->iformat->read_seek)
2181  if (s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
2182  return 0;
2183  ie = &st->index_entries[index];
2184  if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
2185  return ret;
2186  ff_update_cur_dts(s, st, ie->timestamp);
2187 
2188  return 0;
2189 }
2190 
2191 static int seek_frame_internal(AVFormatContext *s, int stream_index,
2192  int64_t timestamp, int flags)
2193 {
2194  int ret;
2195  AVStream *st;
2196 
2197  if (flags & AVSEEK_FLAG_BYTE) {
2198  if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
2199  return -1;
2201  return seek_frame_byte(s, stream_index, timestamp, flags);
2202  }
2203 
2204  if (stream_index < 0) {
2205  stream_index = av_find_default_stream_index(s);
2206  if (stream_index < 0)
2207  return -1;
2208 
2209  st = s->streams[stream_index];
2210  /* timestamp for default must be expressed in AV_TIME_BASE units */
2211  timestamp = av_rescale(timestamp, st->time_base.den,
2212  AV_TIME_BASE * (int64_t) st->time_base.num);
2213  }
2214 
2215  /* first, we try the format specific seek */
2216  if (s->iformat->read_seek) {
2218  ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
2219  } else
2220  ret = -1;
2221  if (ret >= 0)
2222  return 0;
2223 
2224  if (s->iformat->read_timestamp &&
2225  !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
2227  return ff_seek_frame_binary(s, stream_index, timestamp, flags);
2228  } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
2230  return seek_frame_generic(s, stream_index, timestamp, flags);
2231  } else
2232  return -1;
2233 }
2234 
2235 int av_seek_frame(AVFormatContext *s, int stream_index,
2236  int64_t timestamp, int flags)
2237 {
2238  int ret;
2239 
2240  if (s->iformat->read_seek2 && !s->iformat->read_seek) {
2241  int64_t min_ts = INT64_MIN, max_ts = INT64_MAX;
2242  if ((flags & AVSEEK_FLAG_BACKWARD))
2243  max_ts = timestamp;
2244  else
2245  min_ts = timestamp;
2246  return avformat_seek_file(s, stream_index, min_ts, timestamp, max_ts,
2247  flags & ~AVSEEK_FLAG_BACKWARD);
2248  }
2249 
2250  ret = seek_frame_internal(s, stream_index, timestamp, flags);
2251 
2252  if (ret >= 0)
2254 
2255  return ret;
2256 }
2257 
2258 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts,
2259  int64_t ts, int64_t max_ts, int flags)
2260 {
2261  if (min_ts > ts || max_ts < ts)
2262  return -1;
2263  if (stream_index < -1 || stream_index >= (int)s->nb_streams)
2264  return AVERROR(EINVAL);
2265 
2266  if (s->seek2any>0)
2267  flags |= AVSEEK_FLAG_ANY;
2268  flags &= ~AVSEEK_FLAG_BACKWARD;
2269 
2270  if (s->iformat->read_seek2) {
2271  int ret;
2273 
2274  if (stream_index == -1 && s->nb_streams == 1) {
2275  AVRational time_base = s->streams[0]->time_base;
2276  ts = av_rescale_q(ts, AV_TIME_BASE_Q, time_base);
2277  min_ts = av_rescale_rnd(min_ts, time_base.den,
2278  time_base.num * (int64_t)AV_TIME_BASE,
2280  max_ts = av_rescale_rnd(max_ts, time_base.den,
2281  time_base.num * (int64_t)AV_TIME_BASE,
2283  }
2284 
2285  ret = s->iformat->read_seek2(s, stream_index, min_ts,
2286  ts, max_ts, flags);
2287 
2288  if (ret >= 0)
2290  return ret;
2291  }
2292 
2293  if (s->iformat->read_timestamp) {
2294  // try to seek via read_timestamp()
2295  }
2296 
2297  // Fall back on old API if new is not implemented but old is.
2298  // Note the old API has somewhat different semantics.
2299  if (s->iformat->read_seek || 1) {
2300  int dir = (ts - (uint64_t)min_ts > (uint64_t)max_ts - ts ? AVSEEK_FLAG_BACKWARD : 0);
2301  int ret = av_seek_frame(s, stream_index, ts, flags | dir);
2302  if (ret<0 && ts != min_ts && max_ts != ts) {
2303  ret = av_seek_frame(s, stream_index, dir ? max_ts : min_ts, flags | dir);
2304  if (ret >= 0)
2305  ret = av_seek_frame(s, stream_index, ts, flags | (dir^AVSEEK_FLAG_BACKWARD));
2306  }
2307  return ret;
2308  }
2309 
2310  // try some generic seek like seek_frame_generic() but with new ts semantics
2311  return -1; //unreachable
2312 }
2313 
2314 int avformat_flush(AVFormatContext *s)
2315 {
2317  return 0;
2318 }
2319 
2320 /*******************************************************/
2321 
2322 /**
2323  * Return TRUE if the stream has accurate duration in any stream.
2324  *
2325  * @return TRUE if the stream has accurate duration for at least one component.
2326  */
2327 static int has_duration(AVFormatContext *ic)
2328 {
2329  int i;
2330  AVStream *st;
2331 
2332  for (i = 0; i < ic->nb_streams; i++) {
2333  st = ic->streams[i];
2334  if (st->duration != AV_NOPTS_VALUE)
2335  return 1;
2336  }
2337  if (ic->duration != AV_NOPTS_VALUE)
2338  return 1;
2339  return 0;
2340 }
2341 
2342 /**
2343  * Estimate the stream timings from the one of each components.
2344  *
2345  * Also computes the global bitrate if possible.
2346  */
2347 static void update_stream_timings(AVFormatContext *ic)
2348 {
2349  int64_t start_time, start_time1, start_time_text, end_time, end_time1;
2350  int64_t duration, duration1, filesize;
2351  int i;
2352  AVStream *st;
2353  AVProgram *p;
2354 
2355  start_time = INT64_MAX;
2356  start_time_text = INT64_MAX;
2357  end_time = INT64_MIN;
2358  duration = INT64_MIN;
2359  for (i = 0; i < ic->nb_streams; i++) {
2360  st = ic->streams[i];
2361  if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
2362  start_time1 = av_rescale_q(st->start_time, st->time_base,
2363  AV_TIME_BASE_Q);
2365  if (start_time1 < start_time_text)
2366  start_time_text = start_time1;
2367  } else
2368  start_time = FFMIN(start_time, start_time1);
2369  end_time1 = av_rescale_q_rnd(st->duration, st->time_base,
2372  if (end_time1 != AV_NOPTS_VALUE) {
2373  end_time1 += start_time1;
2374  end_time = FFMAX(end_time, end_time1);
2375  }
2376  for (p = NULL; (p = av_find_program_from_stream(ic, p, i)); ) {
2377  if (p->start_time == AV_NOPTS_VALUE || p->start_time > start_time1)
2378  p->start_time = start_time1;
2379  if (p->end_time < end_time1)
2380  p->end_time = end_time1;
2381  }
2382  }
2383  if (st->duration != AV_NOPTS_VALUE) {
2384  duration1 = av_rescale_q(st->duration, st->time_base,
2385  AV_TIME_BASE_Q);
2386  duration = FFMAX(duration, duration1);
2387  }
2388  }
2389  if (start_time == INT64_MAX || (start_time > start_time_text && start_time - start_time_text < AV_TIME_BASE))
2390  start_time = start_time_text;
2391  else if (start_time > start_time_text)
2392  av_log(ic, AV_LOG_VERBOSE, "Ignoring outlier non primary stream starttime %f\n", start_time_text / (float)AV_TIME_BASE);
2393 
2394  if (start_time != INT64_MAX) {
2395  ic->start_time = start_time;
2396  if (end_time != INT64_MIN) {
2397  if (ic->nb_programs) {
2398  for (i = 0; i < ic->nb_programs; i++) {
2399  p = ic->programs[i];
2400  if (p->start_time != AV_NOPTS_VALUE && p->end_time > p->start_time)
2401  duration = FFMAX(duration, p->end_time - p->start_time);
2402  }
2403  } else
2404  duration = FFMAX(duration, end_time - start_time);
2405  }
2406  }
2407  if (duration != INT64_MIN && duration > 0 && ic->duration == AV_NOPTS_VALUE) {
2408  ic->duration = duration;
2409  }
2410  if (ic->pb && (filesize = avio_size(ic->pb)) > 0 && ic->duration != AV_NOPTS_VALUE) {
2411  /* compute the bitrate */
2412  double bitrate = (double) filesize * 8.0 * AV_TIME_BASE /
2413  (double) ic->duration;
2414  if (bitrate >= 0 && bitrate <= INT64_MAX)
2415  ic->bit_rate = bitrate;
2416  }
2417 }
2418 
2419 static void fill_all_stream_timings(AVFormatContext *ic)
2420 {
2421  int i;
2422  AVStream *st;
2423 
2425  for (i = 0; i < ic->nb_streams; i++) {
2426  st = ic->streams[i];
2427  if (st->start_time == AV_NOPTS_VALUE) {
2428  if (ic->start_time != AV_NOPTS_VALUE)
2430  st->time_base);
2431  if (ic->duration != AV_NOPTS_VALUE)
2433  st->time_base);
2434  }
2435  }
2436 }
2437 
2438 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
2439 {
2440  int64_t filesize, duration;
2441  int i, show_warning = 0;
2442  AVStream *st;
2443 
2444  /* if bit_rate is already set, we believe it */
2445  if (ic->bit_rate <= 0) {
2446  int bit_rate = 0;
2447  for (i = 0; i < ic->nb_streams; i++) {
2448  st = ic->streams[i];
2449  if (st->codec->bit_rate > 0) {
2450  if (INT_MAX - st->codec->bit_rate < bit_rate) {
2451  bit_rate = 0;
2452  break;
2453  }
2454  bit_rate += st->codec->bit_rate;
2455  } else if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO && st->codec_info_nb_frames > 1) {
2456  // If we have a videostream with packets but without a bitrate
2457  // then consider the sum not known
2458  bit_rate = 0;
2459  break;
2460  }
2461  }
2462  ic->bit_rate = bit_rate;
2463  }
2464 
2465  /* if duration is already set, we believe it */
2466  if (ic->duration == AV_NOPTS_VALUE &&
2467  ic->bit_rate != 0) {
2468  filesize = ic->pb ? avio_size(ic->pb) : 0;
2469  if (filesize > ic->internal->data_offset) {
2470  filesize -= ic->internal->data_offset;
2471  for (i = 0; i < ic->nb_streams; i++) {
2472  st = ic->streams[i];
2473  if ( st->time_base.num <= INT64_MAX / ic->bit_rate
2474  && st->duration == AV_NOPTS_VALUE) {
2475  duration = av_rescale(8 * filesize, st->time_base.den,
2476  ic->bit_rate *
2477  (int64_t) st->time_base.num);
2478  st->duration = duration;
2479  show_warning = 1;
2480  }
2481  }
2482  }
2483  }
2484  if (show_warning)
2485  av_log(ic, AV_LOG_WARNING,
2486  "Estimating duration from bitrate, this may be inaccurate\n");
2487 }
2488 
2489 #define DURATION_MAX_READ_SIZE 250000LL
2490 #define DURATION_MAX_RETRY 6
2491 
2492 /* only usable for MPEG-PS streams */
2493 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
2494 {
2495  AVPacket pkt1, *pkt = &pkt1;
2496  AVStream *st;
2497  int num, den, read_size, i, ret;
2498  int found_duration = 0;
2499  int is_end;
2500  int64_t filesize, offset, duration;
2501  int retry = 0;
2502 
2503  /* flush packet queue */
2504  flush_packet_queue(ic);
2505 
2506  for (i = 0; i < ic->nb_streams; i++) {
2507  st = ic->streams[i];
2508  if (st->start_time == AV_NOPTS_VALUE &&
2509  st->first_dts == AV_NOPTS_VALUE &&
2512  "start time for stream %d is not set in estimate_timings_from_pts\n", i);
2513 
2514  if (st->parser) {
2515  av_parser_close(st->parser);
2516  st->parser = NULL;
2517  }
2518  }
2519 
2520  av_opt_set(ic, "skip_changes", "1", AV_OPT_SEARCH_CHILDREN);
2521  /* estimate the end time (duration) */
2522  /* XXX: may need to support wrapping */
2523  filesize = ic->pb ? avio_size(ic->pb) : 0;
2524  do {
2525  is_end = found_duration;
2526  offset = filesize - (DURATION_MAX_READ_SIZE << retry);
2527  if (offset < 0)
2528  offset = 0;
2529 
2530  avio_seek(ic->pb, offset, SEEK_SET);
2531  read_size = 0;
2532  for (;;) {
2533  if (read_size >= DURATION_MAX_READ_SIZE << (FFMAX(retry - 1, 0)))
2534  break;
2535 
2536  do {
2537  ret = ff_read_packet(ic, pkt);
2538  } while (ret == AVERROR(EAGAIN));
2539  if (ret != 0)
2540  break;
2541  read_size += pkt->size;
2542  st = ic->streams[pkt->stream_index];
2543  if (pkt->pts != AV_NOPTS_VALUE &&
2544  (st->start_time != AV_NOPTS_VALUE ||
2545  st->first_dts != AV_NOPTS_VALUE)) {
2546  if (pkt->duration == 0) {
2547  ff_compute_frame_duration(ic, &num, &den, st, st->parser, pkt);
2548  if (den && num) {
2549  pkt->duration = av_rescale_rnd(1,
2550  num * (int64_t) st->time_base.den,
2551  den * (int64_t) st->time_base.num,
2552  AV_ROUND_DOWN);
2553  }
2554  }
2555  duration = pkt->pts + pkt->duration;
2556  found_duration = 1;
2557  if (st->start_time != AV_NOPTS_VALUE)
2558  duration -= st->start_time;
2559  else
2560  duration -= st->first_dts;
2561  if (duration > 0) {
2562  if (st->duration == AV_NOPTS_VALUE || st->info->last_duration<= 0 ||
2563  (st->duration < duration && FFABS(duration - st->info->last_duration) < 60LL*st->time_base.den / st->time_base.num))
2564  st->duration = duration;
2565  st->info->last_duration = duration;
2566  }
2567  }
2568  av_packet_unref(pkt);
2569  }
2570 
2571  /* check if all audio/video streams have valid duration */
2572  if (!is_end) {
2573  is_end = 1;
2574  for (i = 0; i < ic->nb_streams; i++) {
2575  st = ic->streams[i];
2576  switch (st->codec->codec_type) {
2577  case AVMEDIA_TYPE_VIDEO:
2578  case AVMEDIA_TYPE_AUDIO:
2579  if (st->duration == AV_NOPTS_VALUE)
2580  is_end = 0;
2581  }
2582  }
2583  }
2584  } while (!is_end &&
2585  offset &&
2586  ++retry <= DURATION_MAX_RETRY);
2587 
2588  av_opt_set(ic, "skip_changes", "0", AV_OPT_SEARCH_CHILDREN);
2589 
2590  /* warn about audio/video streams which duration could not be estimated */
2591  for (i = 0; i < ic->nb_streams; i++) {
2592  st = ic->streams[i];
2593  if (st->duration == AV_NOPTS_VALUE) {
2594  switch (st->codec->codec_type) {
2595  case AVMEDIA_TYPE_VIDEO:
2596  case AVMEDIA_TYPE_AUDIO:
2597  if (st->start_time != AV_NOPTS_VALUE || st->first_dts != AV_NOPTS_VALUE) {
2598  av_log(ic, AV_LOG_DEBUG, "stream %d : no PTS found at end of file, duration not set\n", i);
2599  } else
2600  av_log(ic, AV_LOG_DEBUG, "stream %d : no TS found at start of file, duration not set\n", i);
2601  }
2602  }
2603  }
2605 
2606  avio_seek(ic->pb, old_offset, SEEK_SET);
2607  for (i = 0; i < ic->nb_streams; i++) {
2608  int j;
2609 
2610  st = ic->streams[i];
2611  st->cur_dts = st->first_dts;
2614  for (j = 0; j < MAX_REORDER_DELAY + 1; j++)
2615  st->pts_buffer[j] = AV_NOPTS_VALUE;
2616  }
2617 }
2618 
2619 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
2620 {
2621  int64_t file_size;
2622 
2623  /* get the file size, if possible */
2624  if (ic->iformat->flags & AVFMT_NOFILE) {
2625  file_size = 0;
2626  } else {
2627  file_size = avio_size(ic->pb);
2628  file_size = FFMAX(0, file_size);
2629  }
2630 
2631  if ((!strcmp(ic->iformat->name, "mpeg") ||
2632  !strcmp(ic->iformat->name, "mpegts")) &&
2633  file_size && ic->pb->seekable) {
2634  /* get accurate estimate from the PTSes */
2635  estimate_timings_from_pts(ic, old_offset);
2637  } else if (has_duration(ic)) {
2638  /* at least one component has timings - we use them for all
2639  * the components */
2642  } else {
2643  /* less precise: use bitrate info */
2646  }
2648 
2649  {
2650  int i;
2651  AVStream av_unused *st;
2652  for (i = 0; i < ic->nb_streams; i++) {
2653  st = ic->streams[i];
2654  av_log(ic, AV_LOG_TRACE, "%d: start_time: %0.3f duration: %0.3f\n", i,
2655  (double) st->start_time / AV_TIME_BASE,
2656  (double) st->duration / AV_TIME_BASE);
2657  }
2658  av_log(ic, AV_LOG_TRACE,
2659  "stream: start_time: %0.3f duration: %0.3f bitrate=%"PRId64" kb/s\n",
2660  (double) ic->start_time / AV_TIME_BASE,
2661  (double) ic->duration / AV_TIME_BASE,
2662  (int64_t)ic->bit_rate / 1000);
2663  }
2664 }
2665 
2666 static int has_codec_parameters(AVStream *st, const char **errmsg_ptr)
2667 {
2668  AVCodecContext *avctx = st->codec;
2669 
2670 #define FAIL(errmsg) do { \
2671  if (errmsg_ptr) \
2672  *errmsg_ptr = errmsg; \
2673  return 0; \
2674  } while (0)
2675 
2676  if ( avctx->codec_id == AV_CODEC_ID_NONE
2677  && avctx->codec_type != AVMEDIA_TYPE_DATA)
2678  FAIL("unknown codec");
2679  switch (avctx->codec_type) {
2680  case AVMEDIA_TYPE_AUDIO:
2681  if (!avctx->frame_size && determinable_frame_size(avctx))
2682  FAIL("unspecified frame size");
2683  if (st->info->found_decoder >= 0 &&
2684  avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
2685  FAIL("unspecified sample format");
2686  if (!avctx->sample_rate)
2687  FAIL("unspecified sample rate");
2688  if (!avctx->channels)
2689  FAIL("unspecified number of channels");
2690  if (st->info->found_decoder >= 0 && !st->nb_decoded_frames && avctx->codec_id == AV_CODEC_ID_DTS)
2691  FAIL("no decodable DTS frames");
2692  break;
2693  case AVMEDIA_TYPE_VIDEO:
2694  if (!avctx->width)
2695  FAIL("unspecified size");
2696  if (st->info->found_decoder >= 0 && avctx->pix_fmt == AV_PIX_FMT_NONE)
2697  FAIL("unspecified pixel format");
2700  FAIL("no frame in rv30/40 and no sar");
2701  break;
2702  case AVMEDIA_TYPE_SUBTITLE:
2703  if (avctx->codec_id == AV_CODEC_ID_HDMV_PGS_SUBTITLE && !avctx->width)
2704  FAIL("unspecified size");
2705  break;
2706  case AVMEDIA_TYPE_DATA:
2707  if (avctx->codec_id == AV_CODEC_ID_NONE) return 1;
2708  }
2709 
2710  return 1;
2711 }
2712 
2713 /* returns 1 or 0 if or if not decoded data was returned, or a negative error */
2714 static int try_decode_frame(AVFormatContext *s, AVStream *st, AVPacket *avpkt,
2716 {
2717  const AVCodec *codec;
2718  int got_picture = 1, ret = 0;
2720  AVSubtitle subtitle;
2721  AVPacket pkt = *avpkt;
2722  int do_skip_frame = 0;
2723  enum AVDiscard skip_frame;
2724 
2725  if (!frame)
2726  return AVERROR(ENOMEM);
2727 
2728  if (!avcodec_is_open(st->codec) &&
2729  st->info->found_decoder <= 0 &&
2730  (st->codec->codec_id != -st->info->found_decoder || !st->codec->codec_id)) {
2731  AVDictionary *thread_opt = NULL;
2732 
2733  codec = find_decoder(s, st, st->codec->codec_id);
2734 
2735  if (!codec) {
2736  st->info->found_decoder = -st->codec->codec_id;
2737  ret = -1;
2738  goto fail;
2739  }
2740 
2741  /* Force thread count to 1 since the H.264 decoder will not extract
2742  * SPS and PPS to extradata during multi-threaded decoding. */
2743  av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
2744  if (s->codec_whitelist)
2745  av_dict_set(options ? options : &thread_opt, "codec_whitelist", s->codec_whitelist, 0);
2746  ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt);
2747  if (!options)
2748  av_dict_free(&thread_opt);
2749  if (ret < 0) {
2750  st->info->found_decoder = -st->codec->codec_id;
2751  goto fail;
2752  }
2753  st->info->found_decoder = 1;
2754  } else if (!st->info->found_decoder)
2755  st->info->found_decoder = 1;
2756 
2757  if (st->info->found_decoder < 0) {
2758  ret = -1;
2759  goto fail;
2760  }
2761 
2763  do_skip_frame = 1;
2764  skip_frame = st->codec->skip_frame;
2766  }
2767 
2768  while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
2769  ret >= 0 &&
2771  (!st->codec_info_nb_frames &&
2773  got_picture = 0;
2774  switch (st->codec->codec_type) {
2775  case AVMEDIA_TYPE_VIDEO:
2776  ret = avcodec_decode_video2(st->codec, frame,
2777  &got_picture, &pkt);
2778  break;
2779  case AVMEDIA_TYPE_AUDIO:
2780  ret = avcodec_decode_audio4(st->codec, frame, &got_picture, &pkt);
2781  break;
2782  case AVMEDIA_TYPE_SUBTITLE:
2783  ret = avcodec_decode_subtitle2(st->codec, &subtitle,
2784  &got_picture, &pkt);
2785  ret = pkt.size;
2786  break;
2787  default:
2788  break;
2789  }
2790  if (ret >= 0) {
2791  if (got_picture)
2792  st->nb_decoded_frames++;
2793  pkt.data += ret;
2794  pkt.size -= ret;
2795  ret = got_picture;
2796  }
2797  }
2798 
2799  if (!pkt.data && !got_picture)
2800  ret = -1;
2801 
2802 fail:
2803  if (do_skip_frame) {
2804  st->codec->skip_frame = skip_frame;
2805  }
2806 
2807  av_frame_free(&frame);
2808  return ret;
2809 }
2810 
2811 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum AVCodecID id)
2812 {
2813  while (tags->id != AV_CODEC_ID_NONE) {
2814  if (tags->id == id)
2815  return tags->tag;
2816  tags++;
2817  }
2818  return 0;
2819 }
2820 
2821 enum AVCodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
2822 {
2823  int i;
2824  for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++)
2825  if (tag == tags[i].tag)
2826  return tags[i].id;
2827  for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++)
2828  if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
2829  return tags[i].id;
2830  return AV_CODEC_ID_NONE;
2831 }
2832 
2833 enum AVCodecID ff_get_pcm_codec_id(int bps, int flt, int be, int sflags)
2834 {
2835  if (flt) {
2836  switch (bps) {
2837  case 32:
2839  case 64:
2841  default:
2842  return AV_CODEC_ID_NONE;
2843  }
2844  } else {
2845  bps += 7;
2846  bps >>= 3;
2847  if (sflags & (1 << (bps - 1))) {
2848  switch (bps) {
2849  case 1:
2850  return AV_CODEC_ID_PCM_S8;
2851  case 2:
2853  case 3:
2855  case 4:
2857  default:
2858  return AV_CODEC_ID_NONE;
2859  }
2860  } else {
2861  switch (bps) {
2862  case 1:
2863  return AV_CODEC_ID_PCM_U8;
2864  case 2:
2866  case 3:
2868  case 4:
2870  default:
2871  return AV_CODEC_ID_NONE;
2872  }
2873  }
2874  }
2875 }
2876 
2877 unsigned int av_codec_get_tag(const AVCodecTag *const *tags, enum AVCodecID id)
2878 {
2879  unsigned int tag;
2880  if (!av_codec_get_tag2(tags, id, &tag))
2881  return 0;
2882  return tag;
2883 }
2884 
2885 int av_codec_get_tag2(const AVCodecTag * const *tags, enum AVCodecID id,
2886  unsigned int *tag)
2887 {
2888  int i;
2889  for (i = 0; tags && tags[i]; i++) {
2890  const AVCodecTag *codec_tags = tags[i];
2891  while (codec_tags->id != AV_CODEC_ID_NONE) {
2892  if (codec_tags->id == id) {
2893  *tag = codec_tags->tag;
2894  return 1;
2895  }
2896  codec_tags++;
2897  }
2898  }
2899  return 0;
2900 }
2901 
2902 enum AVCodecID av_codec_get_id(const AVCodecTag *const *tags, unsigned int tag)
2903 {
2904  int i;
2905  for (i = 0; tags && tags[i]; i++) {
2906  enum AVCodecID id = ff_codec_get_id(tags[i], tag);
2907  if (id != AV_CODEC_ID_NONE)
2908  return id;
2909  }
2910  return AV_CODEC_ID_NONE;
2911 }
2912 
2913 static void compute_chapters_end(AVFormatContext *s)
2914 {
2915  unsigned int i, j;
2916  int64_t max_time = 0;
2917 
2918  if (s->duration > 0)
2919  max_time = s->duration +
2920  ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2921 
2922  for (i = 0; i < s->nb_chapters; i++)
2923  if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2924  AVChapter *ch = s->chapters[i];
2925  int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q,
2926  ch->time_base)
2927  : INT64_MAX;
2928 
2929  for (j = 0; j < s->nb_chapters; j++) {
2930  AVChapter *ch1 = s->chapters[j];
2931  int64_t next_start = av_rescale_q(ch1->start, ch1->time_base,
2932  ch->time_base);
2933  if (j != i && next_start > ch->start && next_start < end)
2934  end = next_start;
2935  }
2936  ch->end = (end == INT64_MAX) ? ch->start : end;
2937  }
2938 }
2939 
2940 static int get_std_framerate(int i)
2941 {
2942  if (i < 30*12)
2943  return (i + 1) * 1001;
2944  i -= 30*12;
2945 
2946  if (i < 30)
2947  return (i + 31) * 1001 * 12;
2948  i -= 30;
2949 
2950  if (i < 3)
2951  return ((const int[]) { 80, 120, 240})[i] * 1001 * 12;
2952 
2953  i -= 3;
2954 
2955  return ((const int[]) { 24, 30, 60, 12, 15, 48 })[i] * 1000 * 12;
2956 }
2957 
2958 /* Is the time base unreliable?
2959  * This is a heuristic to balance between quick acceptance of the values in
2960  * the headers vs. some extra checks.
2961  * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2962  * MPEG-2 commonly misuses field repeat flags to store different framerates.
2963  * And there are "variable" fps files this needs to detect as well. */
2965 {
2966  if (c->time_base.den >= 101LL * c->time_base.num ||
2967  c->time_base.den < 5LL * c->time_base.num ||
2968  // c->codec_tag == AV_RL32("DIVX") ||
2969  // c->codec_tag == AV_RL32("XVID") ||
2970  c->codec_tag == AV_RL32("mp4v") ||
2972  c->codec_id == AV_CODEC_ID_GIF ||
2973  c->codec_id == AV_CODEC_ID_HEVC ||
2974  c->codec_id == AV_CODEC_ID_H264)
2975  return 1;
2976  return 0;
2977 }
2978 
2980 {
2981  int ret;
2982 
2984  avctx->extradata = NULL;
2985  avctx->extradata_size = 0;
2986  return AVERROR(EINVAL);
2987  }
2989  if (avctx->extradata) {
2990  memset(avctx->extradata + size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
2991  avctx->extradata_size = size;
2992  ret = 0;
2993  } else {
2994  avctx->extradata_size = 0;
2995  ret = AVERROR(ENOMEM);
2996  }
2997  return ret;
2998 }
2999 
3001 {
3002  int ret = ff_alloc_extradata(avctx, size);
3003  if (ret < 0)
3004  return ret;
3005  ret = avio_read(pb, avctx->extradata, size);
3006  if (ret != size) {
3007  av_freep(&avctx->extradata);
3008  avctx->extradata_size = 0;
3009  av_log(avctx, AV_LOG_ERROR, "Failed to read extradata of size %d\n", size);
3010  return ret < 0 ? ret : AVERROR_INVALIDDATA;
3011  }
3012 
3013  return ret;
3014 }
3015 
3016 int ff_rfps_add_frame(AVFormatContext *ic, AVStream *st, int64_t ts)
3017 {
3018  int i, j;
3019  int64_t last = st->info->last_dts;
3020 
3021  if ( ts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && ts > last
3022  && ts - (uint64_t)last < INT64_MAX) {
3023  double dts = (is_relative(ts) ? ts - RELATIVE_TS_BASE : ts) * av_q2d(st->time_base);
3024  int64_t duration = ts - last;
3025 
3026  if (!st->info->duration_error)
3027  st->info->duration_error = av_mallocz(sizeof(st->info->duration_error[0])*2);
3028  if (!st->info->duration_error)
3029  return AVERROR(ENOMEM);
3030 
3031 // if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
3032 // av_log(NULL, AV_LOG_ERROR, "%f\n", dts);
3033  for (i = 0; i<MAX_STD_TIMEBASES; i++) {
3034  if (st->info->duration_error[0][1][i] < 1e10) {
3035  int framerate = get_std_framerate(i);
3036  double sdts = dts*framerate/(1001*12);
3037  for (j= 0; j<2; j++) {
3038  int64_t ticks = llrint(sdts+j*0.5);
3039  double error= sdts - ticks + j*0.5;
3040  st->info->duration_error[j][0][i] += error;
3041  st->info->duration_error[j][1][i] += error*error;
3042  }
3043  }
3044  }
3045  st->info->duration_count++;
3047 
3048  if (st->info->duration_count % 10 == 0) {
3049  int n = st->info->duration_count;
3050  for (i = 0; i<MAX_STD_TIMEBASES; i++) {
3051  if (st->info->duration_error[0][1][i] < 1e10) {
3052  double a0 = st->info->duration_error[0][0][i] / n;
3053  double error0 = st->info->duration_error[0][1][i] / n - a0*a0;
3054  double a1 = st->info->duration_error[1][0][i] / n;
3055  double error1 = st->info->duration_error[1][1][i] / n - a1*a1;
3056  if (error0 > 0.04 && error1 > 0.04) {
3057  st->info->duration_error[0][1][i] = 2e10;
3058  st->info->duration_error[1][1][i] = 2e10;
3059  }
3060  }
3061  }
3062  }
3063 
3064  // ignore the first 4 values, they might have some random jitter
3065  if (st->info->duration_count > 3 && is_relative(ts) == is_relative(last))
3066  st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration);
3067  }
3068  if (ts != AV_NOPTS_VALUE)
3069  st->info->last_dts = ts;
3070 
3071  return 0;
3072 }
3073 
3074 void ff_rfps_calculate(AVFormatContext *ic)
3075 {
3076  int i, j;
3077 
3078  for (i = 0; i < ic->nb_streams; i++) {
3079  AVStream *st = ic->streams[i];
3080 
3081  if (st->codec->codec_type != AVMEDIA_TYPE_VIDEO)
3082  continue;
3083  // the check for tb_unreliable() is not completely correct, since this is not about handling
3084  // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
3085  // ipmovie.c produces.
3086  if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > FFMAX(1, st->time_base.den/(500LL*st->time_base.num)) && !st->r_frame_rate.num)
3087  av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX);
3088  if (st->info->duration_count>1 && !st->r_frame_rate.num
3089  && tb_unreliable(st->codec)) {
3090  int num = 0;
3091  double best_error= 0.01;
3092  AVRational ref_rate = st->r_frame_rate.num ? st->r_frame_rate : av_inv_q(st->time_base);
3093 
3094  for (j= 0; j<MAX_STD_TIMEBASES; j++) {
3095  int k;
3096 
3097  if (st->info->codec_info_duration && st->info->codec_info_duration*av_q2d(st->time_base) < (1001*12.0)/get_std_framerate(j))
3098  continue;
3099  if (!st->info->codec_info_duration && get_std_framerate(j) < 1001*12)
3100  continue;
3101 
3102  if (av_q2d(st->time_base) * st->info->rfps_duration_sum / st->info->duration_count < (1001*12.0 * 0.8)/get_std_framerate(j))
3103  continue;
3104 
3105  for (k= 0; k<2; k++) {
3106  int n = st->info->duration_count;
3107  double a= st->info->duration_error[k][0][j] / n;
3108  double error= st->info->duration_error[k][1][j]/n - a*a;
3109 
3110  if (error < best_error && best_error> 0.000000001) {
3111  best_error= error;
3112  num = get_std_framerate(j);
3113  }
3114  if (error < 0.02)
3115  av_log(ic, AV_LOG_DEBUG, "rfps: %f %f\n", get_std_framerate(j) / 12.0/1001, error);
3116  }
3117  }
3118  // do not increase frame rate by more than 1 % in order to match a standard rate.
3119  if (num && (!ref_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(ref_rate)))
3120  av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
3121  }
3122  if ( !st->avg_frame_rate.num
3123  && st->r_frame_rate.num && st->info->rfps_duration_sum
3124  && st->info->codec_info_duration <= 0
3125  && st->info->duration_count > 2
3126  && fabs(1.0 / (av_q2d(st->r_frame_rate) * av_q2d(st->time_base)) - st->info->rfps_duration_sum / (double)st->info->duration_count) <= 1.0
3127  ) {
3128  av_log(ic, AV_LOG_DEBUG, "Setting avg frame rate based on r frame rate\n");
3129  st->avg_frame_rate = st->r_frame_rate;
3130  }
3131 
3132  av_freep(&st->info->duration_error);
3133  st->info->last_dts = AV_NOPTS_VALUE;
3134  st->info->duration_count = 0;
3135  st->info->rfps_duration_sum = 0;
3136  }
3137 }
3138 
3140 {
3141  int i, count, ret = 0, j;
3142  int64_t read_size;
3143  AVStream *st;
3144  AVPacket pkt1, *pkt;
3145  int64_t old_offset = avio_tell(ic->pb);
3146  // new streams might appear, no options for those
3147  int orig_nb_streams = ic->nb_streams;
3148  int flush_codecs;
3149  int64_t max_analyze_duration = ic->max_analyze_duration;
3150  int64_t max_stream_analyze_duration;
3151  int64_t max_subtitle_analyze_duration;
3152  int64_t probesize = ic->probesize;
3153 
3154  flush_codecs = probesize > 0;
3155 
3156  av_opt_set(ic, "skip_clear", "1", AV_OPT_SEARCH_CHILDREN);
3157 
3158  max_stream_analyze_duration = max_analyze_duration;
3159  max_subtitle_analyze_duration = max_analyze_duration;
3160  if (!max_analyze_duration) {
3161  max_stream_analyze_duration =
3162  max_analyze_duration = 5*AV_TIME_BASE;
3163  max_subtitle_analyze_duration = 30*AV_TIME_BASE;
3164  if (!strcmp(ic->iformat->name, "flv"))
3165  max_stream_analyze_duration = 90*AV_TIME_BASE;
3166  }
3167 
3168  if (ic->pb)
3169  av_log(ic, AV_LOG_DEBUG, "Before avformat_find_stream_info() pos: %"PRId64" bytes read:%"PRId64" seeks:%d\n",
3170  avio_tell(ic->pb), ic->pb->bytes_read, ic->pb->seek_count);
3171 
3172  for (i = 0; i < ic->nb_streams; i++) {
3173  const AVCodec *codec;
3174  AVDictionary *thread_opt = NULL;
3175  st = ic->streams[i];
3176 
3177  if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
3179 /* if (!st->time_base.num)
3180  st->time_base = */
3181  if (!st->codec->time_base.num)
3182  st->codec->time_base = st->time_base;
3183  }
3184  // only for the split stuff
3185  if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE) && st->request_probe <= 0) {
3186  st->parser = av_parser_init(st->codec->codec_id);
3187  if (st->parser) {
3188  if (st->need_parsing == AVSTREAM_PARSE_HEADERS) {
3190  } else if (st->need_parsing == AVSTREAM_PARSE_FULL_RAW) {
3192  }
3193  } else if (st->need_parsing) {
3194  av_log(ic, AV_LOG_VERBOSE, "parser not found for codec "
3195  "%s, packets or times may be invalid.\n",
3197  }
3198  }
3199  codec = find_decoder(ic, st, st->codec->codec_id);
3200 
3201  /* Force thread count to 1 since the H.264 decoder will not extract
3202  * SPS and PPS to extradata during multi-threaded decoding. */
3203  av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
3204 
3205  if (ic->codec_whitelist)
3206  av_dict_set(options ? &options[i] : &thread_opt, "codec_whitelist", ic->codec_whitelist, 0);
3207 
3208  /* Ensure that subtitle_header is properly set. */
3210  && codec && !st->codec->codec) {
3211  if (avcodec_open2(st->codec, codec, options ? &options[i] : &thread_opt) < 0)
3212  av_log(ic, AV_LOG_WARNING,
3213  "Failed to open codec in av_find_stream_info\n");
3214  }
3215 
3216  // Try to just open decoders, in case this is enough to get parameters.
3217  if (!has_codec_parameters(st, NULL) && st->request_probe <= 0) {
3218  if (codec && !st->codec->codec)
3219  if (avcodec_open2(st->codec, codec, options ? &options[i] : &thread_opt) < 0)
3220  av_log(ic, AV_LOG_WARNING,
3221  "Failed to open codec in av_find_stream_info\n");
3222  }
3223  if (!options)
3224  av_dict_free(&thread_opt);
3225  }
3226 
3227  for (i = 0; i < ic->nb_streams; i++) {
3228 #if FF_API_R_FRAME_RATE
3229  ic->streams[i]->info->last_dts = AV_NOPTS_VALUE;
3230 #endif
3233  }
3234 
3235  count = 0;
3236  read_size = 0;
3237  for (;;) {
3238  int analyzed_all_streams;
3240  ret = AVERROR_EXIT;
3241  av_log(ic, AV_LOG_DEBUG, "interrupted\n");
3242  break;
3243  }
3244 
3245  /* check if one codec still needs to be handled */
3246  for (i = 0; i < ic->nb_streams; i++) {
3247  int fps_analyze_framecount = 20;
3248 
3249  st = ic->streams[i];
3250  if (!has_codec_parameters(st, NULL))
3251  break;
3252  /* If the timebase is coarse (like the usual millisecond precision
3253  * of mkv), we need to analyze more frames to reliably arrive at
3254  * the correct fps. */
3255  if (av_q2d(st->time_base) > 0.0005)
3256  fps_analyze_framecount *= 2;
3257  if (!tb_unreliable(st->codec))
3258  fps_analyze_framecount = 0;
3259  if (ic->fps_probe_size >= 0)
3260  fps_analyze_framecount = ic->fps_probe_size;
3262  fps_analyze_framecount = 0;
3263  /* variable fps and no guess at the real fps */
3264  if (!(st->r_frame_rate.num && st->avg_frame_rate.num) &&
3266  int count = (ic->iformat->flags & AVFMT_NOTIMESTAMPS) ?
3268  st->info->duration_count;
3269  if (count < fps_analyze_framecount)
3270  break;
3271  }
3272  if (st->parser && st->parser->parser->split &&
3273  !st->codec->extradata)
3274  break;
3275  if (st->first_dts == AV_NOPTS_VALUE &&
3276  !(ic->iformat->flags & AVFMT_NOTIMESTAMPS) &&
3277  st->codec_info_nb_frames < ic->max_ts_probe &&
3278  (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
3280  break;
3281  }
3282  analyzed_all_streams = 0;
3283  if (i == ic->nb_streams) {
3284  analyzed_all_streams = 1;
3285  /* NOTE: If the format has no header, then we need to read some
3286  * packets to get most of the streams, so we cannot stop here. */
3287  if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
3288  /* If we found the info for all the codecs, we can stop. */
3289  ret = count;
3290  av_log(ic, AV_LOG_DEBUG, "All info found\n");
3291  flush_codecs = 0;
3292  break;
3293  }
3294  }
3295  /* We did not get all the codec info, but we read too much data. */
3296  if (read_size >= probesize) {
3297  ret = count;
3298  av_log(ic, AV_LOG_DEBUG,
3299  "Probe buffer size limit of %"PRId64" bytes reached\n", probesize);
3300  for (i = 0; i < ic->nb_streams; i++)
3301  if (!ic->streams[i]->r_frame_rate.num &&
3302  ic->streams[i]->info->duration_count <= 1 &&
3304  strcmp(ic->iformat->name, "image2"))
3305  av_log(ic, AV_LOG_WARNING,
3306  "Stream #%d: not enough frames to estimate rate; "
3307  "consider increasing probesize\n", i);
3308  break;
3309  }
3310 
3311  /* NOTE: A new stream can be added there if no header in file
3312  * (AVFMTCTX_NOHEADER). */
3313  ret = read_frame_internal(ic, &pkt1);
3314  if (ret == AVERROR(EAGAIN))
3315  continue;
3316 
3317  if (ret < 0) {
3318  /* EOF or error*/
3319  break;
3320  }
3321 
3322  pkt = &pkt1;
3323 
3324  if (!(ic->flags & AVFMT_FLAG_NOBUFFER)) {
3325  ret = add_to_pktbuf(&ic->internal->packet_buffer, pkt,
3326  &ic->internal->packet_buffer_end, 0);
3327  if (ret < 0)
3328  goto find_stream_info_err;
3329  }
3330 
3331  st = ic->streams[pkt->stream_index];
3333  read_size += pkt->size;
3334 
3335  if (pkt->dts != AV_NOPTS_VALUE && st->codec_info_nb_frames > 1) {
3336  /* check for non-increasing dts */
3337  if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
3338  st->info->fps_last_dts >= pkt->dts) {
3339  av_log(ic, AV_LOG_DEBUG,
3340  "Non-increasing DTS in stream %d: packet %d with DTS "
3341  "%"PRId64", packet %d with DTS %"PRId64"\n",
3342  st->index, st->info->fps_last_dts_idx,
3344  pkt->dts);
3345  st->info->fps_first_dts =
3347  }
3348  /* Check for a discontinuity in dts. If the difference in dts
3349  * is more than 1000 times the average packet duration in the
3350  * sequence, we treat it as a discontinuity. */
3351  if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
3353  (pkt->dts - st->info->fps_last_dts) / 1000 >
3354  (st->info->fps_last_dts - st->info->fps_first_dts) /
3355  (st->info->fps_last_dts_idx - st->info->fps_first_dts_idx)) {
3356  av_log(ic, AV_LOG_WARNING,
3357  "DTS discontinuity in stream %d: packet %d with DTS "
3358  "%"PRId64", packet %d with DTS %"PRId64"\n",
3359  st->index, st->info->fps_last_dts_idx,
3361  pkt->dts);
3362  st->info->fps_first_dts =
3364  }
3365 
3366  /* update stored dts values */
3367  if (st->info->fps_first_dts == AV_NOPTS_VALUE) {
3368  st->info->fps_first_dts = pkt->dts;
3370  }
3371  st->info->fps_last_dts = pkt->dts;
3373  }
3374  if (st->codec_info_nb_frames>1) {
3375  int64_t t = 0;
3376  int64_t limit;
3377 
3378  if (st->time_base.den > 0)
3380  if (st->avg_frame_rate.num > 0)
3382 
3383  if ( t == 0
3384  && st->codec_info_nb_frames>30
3385  && st->info->fps_first_dts != AV_NOPTS_VALUE
3386  && st->info->fps_last_dts != AV_NOPTS_VALUE)
3388 
3389  if (analyzed_all_streams) limit = max_analyze_duration;
3390  else if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) limit = max_subtitle_analyze_duration;
3391  else limit = max_stream_analyze_duration;
3392 
3393  if (t >= limit) {
3394  av_log(ic, AV_LOG_VERBOSE, "max_analyze_duration %"PRId64" reached at %"PRId64" microseconds st:%d\n",
3395  limit,
3396  t, pkt->stream_index);
3397  if (ic->flags & AVFMT_FLAG_NOBUFFER)
3398  av_packet_unref(pkt);
3399  break;
3400  }
3401  if (pkt->duration) {
3402  if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE && pkt->pts != AV_NOPTS_VALUE && pkt->pts >= st->start_time) {
3403  st->info->codec_info_duration = FFMIN(pkt->pts - st->start_time, st->info->codec_info_duration + pkt->duration);
3404  } else
3405  st->info->codec_info_duration += pkt->duration;
3406  st->info->codec_info_duration_fields += st->parser && st->need_parsing && st->codec->ticks_per_frame ==2 ? st->parser->repeat_pict + 1 : 2;
3407  }
3408  }
3409 #if FF_API_R_FRAME_RATE
3410  if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
3411  ff_rfps_add_frame(ic, st, pkt->dts);
3412 #endif
3413  if (st->parser && st->parser->parser->split && !st->codec->extradata) {
3414  int i = st->parser->parser->split(st->codec, pkt->data, pkt->size);
3415  if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
3416  if (ff_alloc_extradata(st->codec, i))
3417  return AVERROR(ENOMEM);
3418  memcpy(st->codec->extradata, pkt->data,
3419  st->codec->extradata_size);
3420  }
3421  }
3422 
3423  /* If still no information, we try to open the codec and to
3424  * decompress the frame. We try to avoid that in most cases as
3425  * it takes longer and uses more memory. For MPEG-4, we need to
3426  * decompress for QuickTime.
3427  *
3428  * If AV_CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
3429  * least one frame of codec data, this makes sure the codec initializes
3430  * the channel configuration and does not only trust the values from
3431  * the container. */
3432  try_decode_frame(ic, st, pkt,
3433  (options && i < orig_nb_streams) ? &options[i] : NULL);
3434 
3435  if (ic->flags & AVFMT_FLAG_NOBUFFER)
3436  av_packet_unref(pkt);
3437 
3438  st->codec_info_nb_frames++;
3439  count++;
3440  }
3441 
3442  if (flush_codecs) {
3443  AVPacket empty_pkt = { 0 };
3444  int err = 0;
3445  av_init_packet(&empty_pkt);
3446 
3447  for (i = 0; i < ic->nb_streams; i++) {
3448 
3449  st = ic->streams[i];
3450 
3451  /* flush the decoders */
3452  if (st->info->found_decoder == 1) {
3453  do {
3454  err = try_decode_frame(ic, st, &empty_pkt,
3455  (options && i < orig_nb_streams)
3456  ? &options[i] : NULL);
3457  } while (err > 0 && !has_codec_parameters(st, NULL));
3458 
3459  if (err < 0) {
3460  av_log(ic, AV_LOG_INFO,
3461  "decoding for stream %d failed\n", st->index);
3462  }
3463  }
3464  }
3465  }
3466 
3467  // close codecs which were opened in try_decode_frame()
3468  for (i = 0; i < ic->nb_streams; i++) {
3469  st = ic->streams[i];
3470  avcodec_close(st->codec);
3471  }
3472 
3473  ff_rfps_calculate(ic);
3474 
3475  for (i = 0; i < ic->nb_streams; i++) {
3476  st = ic->streams[i];
3477  if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
3481  st->codec->codec_tag= tag;
3482  }
3483 
3484  /* estimate average framerate if not set by demuxer */
3485  if (st->info->codec_info_duration_fields &&
3486  !st->avg_frame_rate.num &&
3487  st->info->codec_info_duration) {
3488  int best_fps = 0;
3489  double best_error = 0.01;
3490 
3491  if (st->info->codec_info_duration >= INT64_MAX / st->time_base.num / 2||
3492  st->info->codec_info_duration_fields >= INT64_MAX / st->time_base.den ||
3493  st->info->codec_info_duration < 0)
3494  continue;
3496  st->info->codec_info_duration_fields * (int64_t) st->time_base.den,
3497  st->info->codec_info_duration * 2 * (int64_t) st->time_base.num, 60000);
3498 
3499  /* Round guessed framerate to a "standard" framerate if it's
3500  * within 1% of the original estimate. */
3501  for (j = 0; j < MAX_STD_TIMEBASES; j++) {
3502  AVRational std_fps = { get_std_framerate(j), 12 * 1001 };
3503  double error = fabs(av_q2d(st->avg_frame_rate) /
3504  av_q2d(std_fps) - 1);
3505 
3506  if (error < best_error) {
3507  best_error = error;
3508  best_fps = std_fps.num;
3509  }
3510  }
3511  if (best_fps)
3513  best_fps, 12 * 1001, INT_MAX);
3514  }
3515 
3516  if (!st->r_frame_rate.num) {
3517  if ( st->codec->time_base.den * (int64_t) st->time_base.num
3518  <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t) st->time_base.den) {
3519  st->r_frame_rate.num = st->codec->time_base.den;
3521  } else {
3522  st->r_frame_rate.num = st->time_base.den;
3523  st->r_frame_rate.den = st->time_base.num;
3524  }
3525  }
3527  AVRational hw_ratio = { st->codec->height, st->codec->width };
3529  hw_ratio);
3530  }
3531  } else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
3532  if (!st->codec->bits_per_coded_sample)
3535  // set stream disposition based on audio service type
3536  switch (st->codec->audio_service_type) {
3539  break;
3542  break;
3545  break;
3548  break;
3551  break;
3552  }
3553  }
3554  }
3555 
3556  if (probesize)
3557  estimate_timings(ic, old_offset);
3558 
3559  av_opt_set(ic, "skip_clear", "0", AV_OPT_SEARCH_CHILDREN);
3560 
3561  if (ret >= 0 && ic->nb_streams)
3562  /* We could not have all the codec parameters before EOF. */
3563  ret = -1;
3564  for (i = 0; i < ic->nb_streams; i++) {
3565  const char *errmsg;
3566  st = ic->streams[i];
3567  if (!has_codec_parameters(st, &errmsg)) {
3568  char buf[256];
3569  avcodec_string(buf, sizeof(buf), st->codec, 0);
3570  av_log(ic, AV_LOG_WARNING,
3571  "Could not find codec parameters for stream %d (%s): %s\n"
3572  "Consider increasing the value for the 'analyzeduration' and 'probesize' options\n",
3573  i, buf, errmsg);
3574  } else {
3575  ret = 0;
3576  }
3577  }
3578 
3580 
3581 find_stream_info_err:
3582  for (i = 0; i < ic->nb_streams; i++) {
3583  st = ic->streams[i];
3584  if (ic->streams[i]->codec->codec_type != AVMEDIA_TYPE_AUDIO)
3585  ic->streams[i]->codec->thread_count = 0;
3586  if (st->info)
3587  av_freep(&st->info->duration_error);
3588  av_freep(&ic->streams[i]->info);
3589  }
3590  if (ic->pb)
3591  av_log(ic, AV_LOG_DEBUG, "After avformat_find_stream_info() pos: %"PRId64" bytes read:%"PRId64" seeks:%d frames:%d\n",
3592  avio_tell(ic->pb), ic->pb->bytes_read, ic->pb->seek_count, count);
3593  return ret;
3594 }
3595 
3596 AVProgram *av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
3597 {
3598  int i, j;
3599 
3600  for (i = 0; i < ic->nb_programs; i++) {
3601  if (ic->programs[i] == last) {
3602  last = NULL;
3603  } else {
3604  if (!last)
3605  for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
3606  if (ic->programs[i]->stream_index[j] == s)
3607  return ic->programs[i];
3608  }
3609  }
3610  return NULL;
3611 }
3612 
3613 int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type,
3614  int wanted_stream_nb, int related_stream,
3615  AVCodec **decoder_ret, int flags)
3616 {
3617  int i, nb_streams = ic->nb_streams;
3618  int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1, best_bitrate = -1, best_multiframe = -1, count, bitrate, multiframe;
3619  unsigned *program = NULL;
3620  const AVCodec *decoder = NULL, *best_decoder = NULL;
3621 
3622  if (related_stream >= 0 && wanted_stream_nb < 0) {
3623  AVProgram *p = av_find_program_from_stream(ic, NULL, related_stream);
3624  if (p) {
3625  program = p->stream_index;
3626  nb_streams = p->nb_stream_indexes;
3627  }
3628  }
3629  for (i = 0; i < nb_streams; i++) {
3630  int real_stream_index = program ? program[i] : i;
3631  AVStream *st = ic->streams[real_stream_index];
3632  AVCodecContext *avctx = st->codec;
3633  if (avctx->codec_type != type)
3634  continue;
3635  if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
3636  continue;
3637  if (wanted_stream_nb != real_stream_index &&
3640  continue;
3641  if (type == AVMEDIA_TYPE_AUDIO && !(avctx->channels && avctx->sample_rate))
3642  continue;
3643  if (decoder_ret) {
3644  decoder = find_decoder(ic, st, st->codec->codec_id);
3645  if (!decoder) {
3646  if (ret < 0)
3648  continue;
3649  }
3650  }
3652  bitrate = avctx->bit_rate;
3653  if (!bitrate)
3654  bitrate = avctx->rc_max_rate;
3655  multiframe = FFMIN(5, count);
3656  if ((best_multiframe > multiframe) ||
3657  (best_multiframe == multiframe && best_bitrate > bitrate) ||
3658  (best_multiframe == multiframe && best_bitrate == bitrate && best_count >= count))
3659  continue;
3660  best_count = count;
3661  best_bitrate = bitrate;
3662  best_multiframe = multiframe;
3663  ret = real_stream_index;
3664  best_decoder = decoder;
3665  if (program && i == nb_streams - 1 && ret < 0) {
3666  program = NULL;
3667  nb_streams = ic->nb_streams;
3668  /* no related stream found, try again with everything */
3669  i = 0;
3670  }
3671  }
3672  if (decoder_ret)
3673  *decoder_ret = (AVCodec*)best_decoder;
3674  return ret;
3675 }
3676 
3677 /*******************************************************/
3678 
3679 int av_read_play(AVFormatContext *s)
3680 {
3681  if (s->iformat->read_play)
3682  return s->iformat->read_play(s);
3683  if (s->pb)
3684  return avio_pause(s->pb, 0);
3685  return AVERROR(ENOSYS);
3686 }
3687 
3688 int av_read_pause(AVFormatContext *s)
3689 {
3690  if (s->iformat->read_pause)
3691  return s->iformat->read_pause(s);
3692  if (s->pb)
3693  return avio_pause(s->pb, 1);
3694  return AVERROR(ENOSYS);
3695 }
3696 
3697 static void free_stream(AVStream **pst)
3698 {
3699  AVStream *st = *pst;
3700  int i;
3701 
3702  if (!st)
3703  return;
3704 
3705  for (i = 0; i < st->nb_side_data; i++)
3706  av_freep(&st->side_data[i].data);
3707  av_freep(&st->side_data);
3708 
3709  if (st->parser)
3710  av_parser_close(st->parser);
3711 
3712  if (st->attached_pic.data)
3714 
3715  av_freep(&st->internal);
3716 
3717  av_dict_free(&st->metadata);
3718  av_freep(&st->probe_data.buf);
3719  av_freep(&st->index_entries);
3720  av_freep(&st->codec->extradata);
3722  av_freep(&st->codec);
3723  av_freep(&st->priv_data);
3724  if (st->info)
3725  av_freep(&st->info->duration_error);
3726  av_freep(&st->info);
3728  av_freep(&st->priv_pts);
3729 
3730  av_freep(pst);
3731 }
3732 
3733 void ff_free_stream(AVFormatContext *s, AVStream *st)
3734 {
3735  av_assert0(s->nb_streams>0);
3736  av_assert0(s->streams[ s->nb_streams - 1 ] == st);
3737 
3738  free_stream(&s->streams[ --s->nb_streams ]);
3739 }
3740 
3741 void avformat_free_context(AVFormatContext *s)
3742 {
3743  int i;
3744 
3745  if (!s)
3746  return;
3747 
3748  av_opt_free(s);
3749  if (s->iformat && s->iformat->priv_class && s->priv_data)
3750  av_opt_free(s->priv_data);
3751  if (s->oformat && s->oformat->priv_class && s->priv_data)
3752  av_opt_free(s->priv_data);
3753 
3754  for (i = s->nb_streams - 1; i >= 0; i--)
3755  ff_free_stream(s, s->streams[i]);
3756 
3757 
3758  for (i = s->nb_programs - 1; i >= 0; i--) {
3759  av_dict_free(&s->programs[i]->metadata);
3760  av_freep(&s->programs[i]->stream_index);
3761  av_freep(&s->programs[i]);
3762  }
3763  av_freep(&s->programs);
3764  av_freep(&s->priv_data);
3765  while (s->nb_chapters--) {
3767  av_freep(&s->chapters[s->nb_chapters]);
3768  }
3769  av_freep(&s->chapters);
3770  av_dict_free(&s->metadata);
3771  av_freep(&s->streams);
3772  av_freep(&s->internal);
3773  flush_packet_queue(s);
3774  av_free(s);
3775 }
3776 
3777 void avformat_close_input(AVFormatContext **ps)
3778 {
3779  AVFormatContext *s;
3780  AVIOContext *pb;
3781 
3782  if (!ps || !*ps)
3783  return;
3784 
3785  s = *ps;
3786  pb = s->pb;
3787 
3788  if ((s->iformat && strcmp(s->iformat->name, "image2") && s->iformat->flags & AVFMT_NOFILE) ||
3789  (s->flags & AVFMT_FLAG_CUSTOM_IO))
3790  pb = NULL;
3791 
3792  flush_packet_queue(s);
3793 
3794  if (s->iformat)
3795  if (s->iformat->read_close)
3796  s->iformat->read_close(s);
3797 
3799 
3800  *ps = NULL;
3801 
3802  avio_close(pb);
3803 }
3804 
3805 AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c)
3806 {
3807  AVStream *st;
3808  int i;
3809  AVStream **streams;
3810 
3811  if (s->nb_streams >= INT_MAX/sizeof(*streams))
3812  return NULL;
3813  streams = av_realloc_array(s->streams, s->nb_streams + 1, sizeof(*streams));
3814  if (!streams)
3815  return NULL;
3816  s->streams = streams;
3817 
3818  st = av_mallocz(sizeof(AVStream));
3819  if (!st)
3820  return NULL;
3821  if (!(st->info = av_mallocz(sizeof(*st->info)))) {
3822  av_free(st);
3823  return NULL;
3824  }
3825  st->info->last_dts = AV_NOPTS_VALUE;
3826 
3827  st->codec = avcodec_alloc_context3(c);
3828  if (!st->codec) {
3829  av_free(st->info);
3830  av_free(st);
3831  return NULL;
3832  }
3833 
3834  st->internal = av_mallocz(sizeof(*st->internal));
3835  if (!st->internal)
3836  goto fail;
3837 
3838  if (s->iformat) {
3839  /* no default bitrate if decoding */
3840  st->codec->bit_rate = 0;
3841 
3842  /* default pts setting is MPEG-like */
3843  avpriv_set_pts_info(st, 33, 1, 90000);
3844  /* we set the current DTS to 0 so that formats without any timestamps
3845  * but durations get some timestamps, formats with some unknown
3846  * timestamps have their first few packets buffered and the
3847  * timestamps corrected before they are returned to the user */
3848  st->cur_dts = RELATIVE_TS_BASE;
3849  } else {
3850  st->cur_dts = AV_NOPTS_VALUE;
3851  }
3852 
3853  st->index = s->nb_streams;
3854  st->start_time = AV_NOPTS_VALUE;
3855  st->duration = AV_NOPTS_VALUE;
3856  st->first_dts = AV_NOPTS_VALUE;
3860 
3863  for (i = 0; i < MAX_REORDER_DELAY + 1; i++)
3864  st->pts_buffer[i] = AV_NOPTS_VALUE;
3865 
3866  st->sample_aspect_ratio = (AVRational) { 0, 1 };
3867 
3868 #if FF_API_R_FRAME_RATE
3869  st->info->last_dts = AV_NOPTS_VALUE;
3870 #endif
3873 
3875 
3876  s->streams[s->nb_streams++] = st;
3877  return st;
3878 fail:
3879  free_stream(&st);
3880  return NULL;
3881 }
3882 
3883 AVProgram *av_new_program(AVFormatContext *ac, int id)
3884 {
3885  AVProgram *program = NULL;
3886  int i;
3887 
3888  av_log(ac, AV_LOG_TRACE, "new_program: id=0x%04x\n", id);
3889 
3890  for (i = 0; i < ac->nb_programs; i++)
3891  if (ac->programs[i]->id == id)
3892  program = ac->programs[i];
3893 
3894  if (!program) {
3895  program = av_mallocz(sizeof(AVProgram));
3896  if (!program)
3897  return NULL;
3898  dynarray_add(&ac->programs, &ac->nb_programs, program);
3899  program->discard = AVDISCARD_NONE;
3900  }
3901  program->id = id;
3904 
3905  program->start_time =
3906  program->end_time = AV_NOPTS_VALUE;
3907 
3908  return program;
3909 }
3910 
3911 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base,
3912  int64_t start, int64_t end, const char *title)
3913 {
3914  AVChapter *chapter = NULL;
3915  int i;
3916 
3917  if (end != AV_NOPTS_VALUE && start > end) {
3918  av_log(s, AV_LOG_ERROR, "Chapter end time %"PRId64" before start %"PRId64"\n", end, start);
3919  return NULL;
3920  }
3921 
3922  for (i = 0; i < s->nb_chapters; i++)
3923  if (s->chapters[i]->id == id)
3924  chapter = s->chapters[i];
3925 
3926  if (!chapter) {
3927  chapter = av_mallocz(sizeof(AVChapter));
3928  if (!chapter)
3929  return NULL;
3930  dynarray_add(&s->chapters, &s->nb_chapters, chapter);
3931  }
3932  av_dict_set(&chapter->metadata, "title", title, 0);
3933  chapter->id = id;
3934  chapter->time_base = time_base;
3935  chapter->start = start;
3936  chapter->end = end;
3937 
3938  return chapter;
3939 }
3940 
3941 void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned idx)
3942 {
3943  int i, j;
3944  AVProgram *program = NULL;
3945  void *tmp;
3946 
3947  if (idx >= ac->nb_streams) {
3948  av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
3949  return;
3950  }
3951 
3952  for (i = 0; i < ac->nb_programs; i++) {
3953  if (ac->programs[i]->id != progid)
3954  continue;
3955  program = ac->programs[i];
3956  for (j = 0; j < program->nb_stream_indexes; j++)
3957  if (program->stream_index[j] == idx)
3958  return;
3959 
3960  tmp = av_realloc_array(program->stream_index, program->nb_stream_indexes+1, sizeof(unsigned int));
3961  if (!tmp)
3962  return;
3963  program->stream_index = tmp;
3964  program->stream_index[program->nb_stream_indexes++] = idx;
3965  return;
3966  }
3967 }
3968 
3969 uint64_t ff_ntp_time(void)
3970 {
3971  return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
3972 }
3973 
3974 int av_get_frame_filename(char *buf, int buf_size, const char *path, int number)
3975 {
3976  const char *p;
3977  char *q, buf1[20], c;
3978  int nd, len, percentd_found;
3979 
3980  q = buf;
3981  p = path;
3982  percentd_found = 0;
3983  for (;;) {
3984  c = *p++;
3985  if (c == '\0')
3986  break;
3987  if (c == '%') {
3988  do {
3989  nd = 0;
3990  while (av_isdigit(*p))
3991  nd = nd * 10 + *p++ - '0';
3992  c = *p++;
3993  } while (av_isdigit(c));
3994 
3995  switch (c) {
3996  case '%':
3997  goto addchar;
3998  case 'd':
3999  if (percentd_found)
4000  goto fail;
4001  percentd_found = 1;
4002  if (number < 0)
4003  nd += 1;
4004  snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
4005  len = strlen(buf1);
4006  if ((q - buf + len) > buf_size - 1)
4007  goto fail;
4008  memcpy(q, buf1, len);
4009  q += len;
4010  break;
4011  default:
4012  goto fail;
4013  }
4014  } else {
4015 addchar:
4016  if ((q - buf) < buf_size - 1)
4017  *q++ = c;
4018  }
4019  }
4020  if (!percentd_found)
4021  goto fail;
4022  *q = '\0';
4023  return 0;
4024 fail:
4025  *q = '\0';
4026  return -1;
4027 }
4028 
4029 void av_url_split(char *proto, int proto_size,
4030  char *authorization, int authorization_size,
4031  char *hostname, int hostname_size,
4032  int *port_ptr, char *path, int path_size, const char *url)
4033 {
4034  const char *p, *ls, *ls2, *at, *at2, *col, *brk;
4035 
4036  if (port_ptr)
4037  *port_ptr = -1;
4038  if (proto_size > 0)
4039  proto[0] = 0;
4040  if (authorization_size > 0)
4041  authorization[0] = 0;
4042  if (hostname_size > 0)
4043  hostname[0] = 0;
4044  if (path_size > 0)
4045  path[0] = 0;
4046 
4047  /* parse protocol */
4048  if ((p = strchr(url, ':'))) {
4049  av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
4050  p++; /* skip ':' */
4051  if (*p == '/')
4052  p++;
4053  if (*p == '/')
4054  p++;
4055  } else {
4056  /* no protocol means plain filename */
4057  av_strlcpy(path, url, path_size);
4058  return;
4059  }
4060 
4061  /* separate path from hostname */
4062  ls = strchr(p, '/');
4063  ls2 = strchr(p, '?');
4064  if (!ls)
4065  ls = ls2;
4066  else if (ls && ls2)
4067  ls = FFMIN(ls, ls2);
4068  if (ls)
4069  av_strlcpy(path, ls, path_size);
4070  else
4071  ls = &p[strlen(p)]; // XXX
4072 
4073  /* the rest is hostname, use that to parse auth/port */
4074  if (ls != p) {
4075  /* authorization (user[:pass]@hostname) */
4076  at2 = p;
4077  while ((at = strchr(p, '@')) && at < ls) {
4078  av_strlcpy(authorization, at2,
4079  FFMIN(authorization_size, at + 1 - at2));
4080  p = at + 1; /* skip '@' */
4081  }
4082 
4083  if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
4084  /* [host]:port */
4085  av_strlcpy(hostname, p + 1,
4086  FFMIN(hostname_size, brk - p));
4087  if (brk[1] == ':' && port_ptr)
4088  *port_ptr = atoi(brk + 2);
4089  } else if ((col = strchr(p, ':')) && col < ls) {
4090  av_strlcpy(hostname, p,
4091  FFMIN(col + 1 - p, hostname_size));
4092  if (port_ptr)
4093  *port_ptr = atoi(col + 1);
4094  } else
4095  av_strlcpy(hostname, p,
4096  FFMIN(ls + 1 - p, hostname_size));
4097  }
4098 }
4099 
4100 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
4101 {
4102  int i;
4103  static const char hex_table_uc[16] = { '0', '1', '2', '3',
4104  '4', '5', '6', '7',
4105  '8', '9', 'A', 'B',
4106  'C', 'D', 'E', 'F' };
4107  static const char hex_table_lc[16] = { '0', '1', '2', '3',
4108  '4', '5', '6', '7',
4109  '8', '9', 'a', 'b',
4110  'c', 'd', 'e', 'f' };
4111  const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
4112 
4113  for (i = 0; i < s; i++) {
4114  buff[i * 2] = hex_table[src[i] >> 4];
4115  buff[i * 2 + 1] = hex_table[src[i] & 0xF];
4116  }
4117 
4118  return buff;
4119 }
4120 
4121 int ff_hex_to_data(uint8_t *data, const char *p)
4122 {
4123  int c, len, v;
4124 
4125  len = 0;
4126  v = 1;
4127  for (;;) {
4128  p += strspn(p, SPACE_CHARS);
4129  if (*p == '\0')
4130  break;
4131  c = av_toupper((unsigned char) *p++);
4132  if (c >= '0' && c <= '9')
4133  c = c - '0';
4134  else if (c >= 'A' && c <= 'F')
4135  c = c - 'A' + 10;
4136  else
4137  break;
4138  v = (v << 4) | c;
4139  if (v & 0x100) {
4140  if (data)
4141  data[len] = v;
4142  len++;
4143  v = 1;
4144  }
4145  }
4146  return len;
4147 }
4148 
4149 void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
4150  unsigned int pts_num, unsigned int pts_den)
4151 {
4152  AVRational new_tb;
4153  if (av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)) {
4154  if (new_tb.num != pts_num)
4156  "st:%d removing common factor %d from timebase\n",
4157  s->index, pts_num / new_tb.num);
4158  } else
4160  "st:%d has too large timebase, reducing\n", s->index);
4161 
4162  if (new_tb.num <= 0 || new_tb.den <= 0) {
4164  "Ignoring attempt to set invalid timebase %d/%d for st:%d\n",
4165  new_tb.num, new_tb.den,
4166  s->index);
4167  return;
4168  }
4169  s->time_base = new_tb;
4170  av_codec_set_pkt_timebase(s->codec, new_tb);
4171  s->pts_wrap_bits = pts_wrap_bits;
4172 }
4173 
4174 void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
4175  void *context)
4176 {
4177  const char *ptr = str;
4178 
4179  /* Parse key=value pairs. */
4180  for (;;) {
4181  const char *key;
4182  char *dest = NULL, *dest_end;
4183  int key_len, dest_len = 0;
4184 
4185  /* Skip whitespace and potential commas. */
4186  while (*ptr && (av_isspace(*ptr) || *ptr == ','))
4187  ptr++;
4188  if (!*ptr)
4189  break;
4190 
4191  key = ptr;
4192 
4193  if (!(ptr = strchr(key, '=')))
4194  break;
4195  ptr++;
4196  key_len = ptr - key;
4197 
4198  callback_get_buf(context, key, key_len, &dest, &dest_len);
4199  dest_end = dest + dest_len - 1;
4200 
4201  if (*ptr == '\"') {
4202  ptr++;
4203  while (*ptr && *ptr != '\"') {
4204  if (*ptr == '\\') {
4205  if (!ptr[1])
4206  break;
4207  if (dest && dest < dest_end)
4208  *dest++ = ptr[1];
4209  ptr += 2;
4210  } else {
4211  if (dest && dest < dest_end)
4212  *dest++ = *ptr;
4213  ptr++;
4214  }
4215  }
4216  if (*ptr == '\"')
4217  ptr++;
4218  } else {
4219  for (; *ptr && !(av_isspace(*ptr) || *ptr == ','); ptr++)
4220  if (dest && dest < dest_end)
4221  *dest++ = *ptr;
4222  }
4223  if (dest)
4224  *dest = 0;
4225  }
4226 }
4227 
4228 int ff_find_stream_index(AVFormatContext *s, int id)
4229 {
4230  int i;
4231  for (i = 0; i < s->nb_streams; i++)
4232  if (s->streams[i]->id == id)
4233  return i;
4234  return -1;
4235 }
4236 
4238  int std_compliance)
4239 {
4240  if (ofmt) {
4241  unsigned int codec_tag;
4242  if (ofmt->query_codec)
4243  return ofmt->query_codec(codec_id, std_compliance);
4244  else if (ofmt->codec_tag)
4245  return !!av_codec_get_tag2(ofmt->codec_tag, codec_id, &codec_tag);
4246  else if (codec_id == ofmt->video_codec ||
4247  codec_id == ofmt->audio_codec ||
4248  codec_id == ofmt->subtitle_codec)
4249  return 1;
4250  }
4251  return AVERROR_PATCHWELCOME;
4252 }
4253 
4255 {
4256 #if CONFIG_NETWORK
4257  int ret;
4259  if ((ret = ff_network_init()) < 0)
4260  return ret;
4261  if ((ret = ff_tls_init()) < 0)
4262  return ret;
4263 #endif
4264  return 0;
4265 }
4266 
4268 {
4269 #if CONFIG_NETWORK
4270  ff_network_close();
4271  ff_tls_deinit();
4273 #endif
4274  return 0;
4275 }
4276 
4278  uint64_t channel_layout, int32_t sample_rate,
4280 {
4281  uint32_t flags = 0;
4282  int size = 4;
4283  uint8_t *data;
4284  if (!pkt)
4285  return AVERROR(EINVAL);
4286  if (channels) {
4287  size += 4;
4289  }
4290  if (channel_layout) {
4291  size += 8;
4293  }
4294  if (sample_rate) {
4295  size += 4;
4297  }
4298  if (width || height) {
4299  size += 8;
4301  }
4303  if (!data)
4304  return AVERROR(ENOMEM);
4305  bytestream_put_le32(&data, flags);
4306  if (channels)
4307  bytestream_put_le32(&data, channels);
4308  if (channel_layout)
4309  bytestream_put_le64(&data, channel_layout);
4310  if (sample_rate)
4311  bytestream_put_le32(&data, sample_rate);
4312  if (width || height) {
4313  bytestream_put_le32(&data, width);
4314  bytestream_put_le32(&data, height);
4315  }
4316  return 0;
4317 }
4318 
4319 AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
4320 {
4321  AVRational undef = {0, 1};
4322  AVRational stream_sample_aspect_ratio = stream ? stream->sample_aspect_ratio : undef;
4323  AVRational codec_sample_aspect_ratio = stream && stream->codec ? stream->codec->sample_aspect_ratio : undef;
4324  AVRational frame_sample_aspect_ratio = frame ? frame->sample_aspect_ratio : codec_sample_aspect_ratio;
4325 
4326  av_reduce(&stream_sample_aspect_ratio.num, &stream_sample_aspect_ratio.den,
4327  stream_sample_aspect_ratio.num, stream_sample_aspect_ratio.den, INT_MAX);
4328  if (stream_sample_aspect_ratio.num <= 0 || stream_sample_aspect_ratio.den <= 0)
4329  stream_sample_aspect_ratio = undef;
4330 
4331  av_reduce(&frame_sample_aspect_ratio.num, &frame_sample_aspect_ratio.den,
4332  frame_sample_aspect_ratio.num, frame_sample_aspect_ratio.den, INT_MAX);
4333  if (frame_sample_aspect_ratio.num <= 0 || frame_sample_aspect_ratio.den <= 0)
4334  frame_sample_aspect_ratio = undef;
4335 
4336  if (stream_sample_aspect_ratio.num)
4337  return stream_sample_aspect_ratio;
4338  else
4339  return frame_sample_aspect_ratio;
4340 }
4341 
4342 AVRational av_guess_frame_rate(AVFormatContext *format, AVStream *st, AVFrame *frame)
4343 {
4344  AVRational fr = st->r_frame_rate;
4345  AVRational codec_fr = st->codec->framerate;
4346  AVRational avg_fr = st->avg_frame_rate;
4347 
4348  if (avg_fr.num > 0 && avg_fr.den > 0 && fr.num > 0 && fr.den > 0 &&
4349  av_q2d(avg_fr) < 70 && av_q2d(fr) > 210) {
4350  fr = avg_fr;
4351  }
4352 
4353 
4354  if (st->codec->ticks_per_frame > 1) {
4355  if ( codec_fr.num > 0 && codec_fr.den > 0 &&
4356  (fr.num == 0 || av_q2d(codec_fr) < av_q2d(fr)*0.7 && fabs(1.0 - av_q2d(av_div_q(avg_fr, fr))) > 0.1))
4357  fr = codec_fr;
4358  }
4359 
4360  return fr;
4361 }
4362 
4363 int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st,
4364  const char *spec)
4365 {
4366  if (*spec <= '9' && *spec >= '0') /* opt:index */
4367  return strtol(spec, NULL, 0) == st->index;
4368  else if (*spec == 'v' || *spec == 'a' || *spec == 's' || *spec == 'd' ||
4369  *spec == 't' || *spec == 'V') { /* opt:[vasdtV] */
4370  enum AVMediaType type;
4371  int nopic = 0;
4372 
4373  switch (*spec++) {
4374  case 'v': type = AVMEDIA_TYPE_VIDEO; break;
4375  case 'a': type = AVMEDIA_TYPE_AUDIO; break;
4376  case 's': type = AVMEDIA_TYPE_SUBTITLE; break;
4377  case 'd': type = AVMEDIA_TYPE_DATA; break;
4378  case 't': type = AVMEDIA_TYPE_ATTACHMENT; break;
4379  case 'V': type = AVMEDIA_TYPE_VIDEO; nopic = 1; break;
4380  default: av_assert0(0);
4381  }
4382  if (type != st->codec->codec_type)
4383  return 0;
4384  if (nopic && (st->disposition & AV_DISPOSITION_ATTACHED_PIC))
4385  return 0;
4386  if (*spec++ == ':') { /* possibly followed by :index */
4387  int i, index = strtol(spec, NULL, 0);
4388  for (i = 0; i < s->nb_streams; i++)
4389  if (s->streams[i]->codec->codec_type == type &&
4390  !(nopic && (st->disposition & AV_DISPOSITION_ATTACHED_PIC)) &&
4391  index-- == 0)
4392  return i == st->index;
4393  return 0;
4394  }
4395  return 1;
4396  } else if (*spec == 'p' && *(spec + 1) == ':') {
4397  int prog_id, i, j;
4398  char *endptr;
4399  spec += 2;
4400  prog_id = strtol(spec, &endptr, 0);
4401  for (i = 0; i < s->nb_programs; i++) {
4402  if (s->programs[i]->id != prog_id)
4403  continue;
4404 
4405  if (*endptr++ == ':') {
4406  int stream_idx = strtol(endptr, NULL, 0);
4407  return stream_idx >= 0 &&
4408  stream_idx < s->programs[i]->nb_stream_indexes &&
4409  st->index == s->programs[i]->stream_index[stream_idx];
4410  }
4411 
4412  for (j = 0; j < s->programs[i]->nb_stream_indexes; j++)
4413  if (st->index == s->programs[i]->stream_index[j])
4414  return 1;
4415  }
4416  return 0;
4417  } else if (*spec == '#' ||
4418  (*spec == 'i' && *(spec + 1) == ':')) {
4419  int stream_id;
4420  char *endptr;
4421  spec += 1 + (*spec == 'i');
4422  stream_id = strtol(spec, &endptr, 0);
4423  if (!*endptr)
4424  return stream_id == st->id;
4425  } else if (*spec == 'm' && *(spec + 1) == ':') {
4427  char *key, *val;
4428  int ret;
4429 
4430  spec += 2;
4431  val = strchr(spec, ':');
4432 
4433  key = val ? av_strndup(spec, val - spec) : av_strdup(spec);
4434  if (!key)
4435  return AVERROR(ENOMEM);
4436 
4437  tag = av_dict_get(st->metadata, key, NULL, 0);
4438  if (tag) {
4439  if (!val || !strcmp(tag->value, val + 1))
4440  ret = 1;
4441  else
4442  ret = 0;
4443  } else
4444  ret = 0;
4445 
4446  av_freep(&key);
4447  return ret;
4448  } else if (*spec == 'u') {
4449  AVCodecContext *avctx = st->codec;
4450  int val;
4451  switch (avctx->codec_type) {
4452  case AVMEDIA_TYPE_AUDIO:
4453  val = avctx->sample_rate && avctx->channels;
4454  if (avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
4455  return 0;
4456  break;
4457  case AVMEDIA_TYPE_VIDEO:
4458  val = avctx->width && avctx->height;
4459  if (avctx->pix_fmt == AV_PIX_FMT_NONE)
4460  return 0;
4461  break;
4462  case AVMEDIA_TYPE_UNKNOWN:
4463  val = 0;
4464  break;
4465  default:
4466  val = 1;
4467  break;
4468  }
4469  return avctx->codec_id != AV_CODEC_ID_NONE && val != 0;
4470  } else if (!*spec) /* empty specifier, matches everything */
4471  return 1;
4472 
4473  av_log(s, AV_LOG_ERROR, "Invalid stream specifier: %s.\n", spec);
4474  return AVERROR(EINVAL);
4475 }
4476 
4478 {
4479  static const uint8_t avci100_1080p_extradata[] = {
4480  // SPS
4481  0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
4482  0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
4483  0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
4484  0x18, 0x21, 0x02, 0x56, 0xb9, 0x3d, 0x7d, 0x7e,
4485  0x4f, 0xe3, 0x3f, 0x11, 0xf1, 0x9e, 0x08, 0xb8,
4486  0x8c, 0x54, 0x43, 0xc0, 0x78, 0x02, 0x27, 0xe2,
4487  0x70, 0x1e, 0x30, 0x10, 0x10, 0x14, 0x00, 0x00,
4488  0x03, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0xca,
4489  0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
4490  // PPS
4491  0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
4492  0xd0
4493  };
4494  static const uint8_t avci100_1080i_extradata[] = {
4495  // SPS
4496  0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
4497  0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
4498  0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
4499  0x18, 0x21, 0x03, 0x3a, 0x46, 0x65, 0x6a, 0x65,
4500  0x24, 0xad, 0xe9, 0x12, 0x32, 0x14, 0x1a, 0x26,
4501  0x34, 0xad, 0xa4, 0x41, 0x82, 0x23, 0x01, 0x50,
4502  0x2b, 0x1a, 0x24, 0x69, 0x48, 0x30, 0x40, 0x2e,
4503  0x11, 0x12, 0x08, 0xc6, 0x8c, 0x04, 0x41, 0x28,
4504  0x4c, 0x34, 0xf0, 0x1e, 0x01, 0x13, 0xf2, 0xe0,
4505  0x3c, 0x60, 0x20, 0x20, 0x28, 0x00, 0x00, 0x03,
4506  0x00, 0x08, 0x00, 0x00, 0x03, 0x01, 0x94, 0x20,
4507  // PPS
4508  0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
4509  0xd0
4510  };
4511  static const uint8_t avci50_1080p_extradata[] = {
4512  // SPS
4513  0x00, 0x00, 0x00, 0x01, 0x67, 0x6e, 0x10, 0x28,
4514  0xa6, 0xd4, 0x20, 0x32, 0x33, 0x0c, 0x71, 0x18,
4515  0x88, 0x62, 0x10, 0x19, 0x19, 0x86, 0x38, 0x8c,
4516  0x44, 0x30, 0x21, 0x02, 0x56, 0x4e, 0x6f, 0x37,
4517  0xcd, 0xf9, 0xbf, 0x81, 0x6b, 0xf3, 0x7c, 0xde,
4518  0x6e, 0x6c, 0xd3, 0x3c, 0x05, 0xa0, 0x22, 0x7e,
4519  0x5f, 0xfc, 0x00, 0x0c, 0x00, 0x13, 0x8c, 0x04,
4520  0x04, 0x05, 0x00, 0x00, 0x03, 0x00, 0x01, 0x00,
4521  0x00, 0x03, 0x00, 0x32, 0x84, 0x00, 0x00, 0x00,
4522  // PPS
4523  0x00, 0x00, 0x00, 0x01, 0x68, 0xee, 0x31, 0x12,
4524  0x11
4525  };
4526  static const uint8_t avci50_1080i_extradata[] = {
4527  // SPS
4528  0x00, 0x00, 0x00, 0x01, 0x67, 0x6e, 0x10, 0x28,
4529  0xa6, 0xd4, 0x20, 0x32, 0x33, 0x0c, 0x71, 0x18,
4530  0x88, 0x62, 0x10, 0x19, 0x19, 0x86, 0x38, 0x8c,
4531  0x44, 0x30, 0x21, 0x02, 0x56, 0x4e, 0x6e, 0x61,
4532  0x87, 0x3e, 0x73, 0x4d, 0x98, 0x0c, 0x03, 0x06,
4533  0x9c, 0x0b, 0x73, 0xe6, 0xc0, 0xb5, 0x18, 0x63,
4534  0x0d, 0x39, 0xe0, 0x5b, 0x02, 0xd4, 0xc6, 0x19,
4535  0x1a, 0x79, 0x8c, 0x32, 0x34, 0x24, 0xf0, 0x16,
4536  0x81, 0x13, 0xf7, 0xff, 0x80, 0x02, 0x00, 0x01,
4537  0xf1, 0x80, 0x80, 0x80, 0xa0, 0x00, 0x00, 0x03,
4538  0x00, 0x20, 0x00, 0x00, 0x06, 0x50, 0x80, 0x00,
4539  // PPS
4540  0x00, 0x00, 0x00, 0x01, 0x68, 0xee, 0x31, 0x12,
4541  0x11
4542  };
4543  static const uint8_t avci100_720p_extradata[] = {
4544  // SPS
4545  0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
4546  0xb6, 0xd4, 0x20, 0x2a, 0x33, 0x1d, 0xc7, 0x62,
4547  0xa1, 0x08, 0x40, 0x54, 0x66, 0x3b, 0x8e, 0xc5,
4548  0x42, 0x02, 0x10, 0x25, 0x64, 0x2c, 0x89, 0xe8,
4549  0x85, 0xe4, 0x21, 0x4b, 0x90, 0x83, 0x06, 0x95,
4550  0xd1, 0x06, 0x46, 0x97, 0x20, 0xc8, 0xd7, 0x43,
4551  0x08, 0x11, 0xc2, 0x1e, 0x4c, 0x91, 0x0f, 0x01,
4552  0x40, 0x16, 0xec, 0x07, 0x8c, 0x04, 0x04, 0x05,
4553  0x00, 0x00, 0x03, 0x00, 0x01, 0x00, 0x00, 0x03,
4554  0x00, 0x64, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00,
4555  // PPS
4556  0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x31, 0x12,
4557  0x11
4558  };
4559  static const uint8_t avci50_720p_extradata[] = {
4560  // SPS
4561  0x00, 0x00, 0x00, 0x01, 0x67, 0x6e, 0x10, 0x20,
4562  0xa6, 0xd4, 0x20, 0x32, 0x33, 0x0c, 0x71, 0x18,
4563  0x88, 0x62, 0x10, 0x19, 0x19, 0x86, 0x38, 0x8c,
4564  0x44, 0x30, 0x21, 0x02, 0x56, 0x4e, 0x6f, 0x37,
4565  0xcd, 0xf9, 0xbf, 0x81, 0x6b, 0xf3, 0x7c, 0xde,
4566  0x6e, 0x6c, 0xd3, 0x3c, 0x0f, 0x01, 0x6e, 0xff,
4567  0xc0, 0x00, 0xc0, 0x01, 0x38, 0xc0, 0x40, 0x40,
4568  0x50, 0x00, 0x00, 0x03, 0x00, 0x10, 0x00, 0x00,
4569  0x06, 0x48, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00,
4570  // PPS
4571  0x00, 0x00, 0x00, 0x01, 0x68, 0xee, 0x31, 0x12,
4572  0x11
4573  };
4574 
4575  const uint8_t *data = NULL;
4576  int size = 0;
4577 
4578  if (st->codec->width == 1920) {
4579  if (st->codec->field_order == AV_FIELD_PROGRESSIVE) {
4580  data = avci100_1080p_extradata;
4581  size = sizeof(avci100_1080p_extradata);
4582  } else {
4583  data = avci100_1080i_extradata;
4584  size = sizeof(avci100_1080i_extradata);
4585  }
4586  } else if (st->codec->width == 1440) {
4587  if (st->codec->field_order == AV_FIELD_PROGRESSIVE) {
4588  data = avci50_1080p_extradata;
4589  size = sizeof(avci50_1080p_extradata);
4590  } else {
4591  data = avci50_1080i_extradata;
4592  size = sizeof(avci50_1080i_extradata);
4593  }
4594  } else if (st->codec->width == 1280) {
4595  data = avci100_720p_extradata;
4596  size = sizeof(avci100_720p_extradata);
4597  } else if (st->codec->width == 960) {
4598  data = avci50_720p_extradata;
4599  size = sizeof(avci50_720p_extradata);
4600  }
4601 
4602  if (!size)
4603  return 0;
4604 
4605  av_freep(&st->codec->extradata);
4606  if (ff_alloc_extradata(st->codec, size))
4607  return AVERROR(ENOMEM);
4608  memcpy(st->codec->extradata, data, size);
4609 
4610  return 0;
4611 }
4612 
4614  int *size)
4615 {
4616  int i;
4617 
4618  for (i = 0; i < st->nb_side_data; i++) {
4619  if (st->side_data[i].type == type) {
4620  if (size)
4621  *size = st->side_data[i].size;
4622  return st->side_data[i].data;
4623  }
4624  }
4625  return NULL;
4626 }
4627 
4629  int size)
4630 {
4631  AVPacketSideData *sd, *tmp;
4632  int i;
4633  uint8_t *data = av_malloc(size);
4634 
4635  if (!data)
4636  return NULL;
4637 
4638  for (i = 0; i < st->nb_side_data; i++) {
4639  sd = &st->side_data[i];
4640 
4641  if (sd->type == type) {
4642  av_freep(&sd->data);
4643  sd->data = data;
4644  sd->size = size;
4645  return sd->data;
4646  }
4647  }
4648 
4649  tmp = av_realloc_array(st->side_data, st->nb_side_data + 1, sizeof(*tmp));
4650  if (!tmp) {
4651  av_freep(&data);
4652  return NULL;
4653  }
4654 
4655  st->side_data = tmp;
4656  st->nb_side_data++;
4657 
4658  sd = &st->side_data[st->nb_side_data - 1];
4659  sd->type = type;
4660  sd->data = data;
4661  sd->size = size;
4662  return data;
4663 }
4664 
4665 int ff_stream_add_bitstream_filter(AVStream *st, const char *name, const char *args)
4666 {
4668  AVBitStreamFilterContext **dest = &st->internal->bsfc;
4669  while (*dest && (*dest)->next)
4670  dest = &(*dest)->next;
4671 
4672  if (!(bsfc = av_bitstream_filter_init(name))) {
4673  av_log(NULL, AV_LOG_ERROR, "Unknown bitstream filter '%s'\n", name);
4674  return AVERROR(EINVAL);
4675  }
4676  if (args && !(bsfc->args = av_strdup(args))) {
4678  return AVERROR(ENOMEM);
4679  }
4681  "Automatically inserted bitstream filter '%s'; args='%s'\n",
4682  name, args ? args : "");
4683  *dest = bsfc;
4684  return 1;
4685 }
4686 
4689 {
4690  int ret = 0;
4691  while (bsfc) {
4692  AVPacket new_pkt = *pkt;
4693  int a = av_bitstream_filter_filter(bsfc, codec, NULL,
4694  &new_pkt.data, &new_pkt.size,
4695  pkt->data, pkt->size,
4696  pkt->flags & AV_PKT_FLAG_KEY);
4697  if(a == 0 && new_pkt.data != pkt->data) {
4698  uint8_t *t = av_malloc(new_pkt.size + AV_INPUT_BUFFER_PADDING_SIZE); //the new should be a subset of the old so cannot overflow
4699  if (t) {
4700  memcpy(t, new_pkt.data, new_pkt.size);
4701  memset(t + new_pkt.size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
4702  new_pkt.data = t;
4703  new_pkt.buf = NULL;
4704  a = 1;
4705  } else {
4706  a = AVERROR(ENOMEM);
4707  }
4708  }
4709  if (a > 0) {
4710  new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size,
4712  if (new_pkt.buf) {
4713  pkt->side_data = NULL;
4714  pkt->side_data_elems = 0;
4715  av_packet_unref(pkt);
4716  } else {
4717  av_freep(&new_pkt.data);
4718  a = AVERROR(ENOMEM);
4719  }
4720  }
4721  if (a < 0) {
4722  av_log(codec, AV_LOG_ERROR,
4723  "Failed to open bitstream filter %s for stream %d with codec %s",
4724  bsfc->filter->name, pkt->stream_index,
4725  codec->codec ? codec->codec->name : "copy");
4726  ret = a;
4727  break;
4728  }
4729  *pkt = new_pkt;
4730 
4731  bsfc = bsfc->next;
4732  }
4733  return ret;
4734 }
4735 
4736 void ff_format_io_close(AVFormatContext *s, AVIOContext **pb)
4737 {
4738  if (*pb)
4739  s->io_close(s, *pb);
4740  *pb = NULL;
4741 }
4742 
4743 int ff_parse_creation_time_metadata(AVFormatContext *s, int64_t *timestamp, int return_seconds)
4744 {
4745  AVDictionaryEntry *entry;
4746  int64_t parsed_timestamp;
4747  int ret;
4748  if ((entry = av_dict_get(s->metadata, "creation_time", NULL, 0))) {
4749  if ((ret = av_parse_time(&parsed_timestamp, entry->value, 0)) >= 0) {
4750  *timestamp = return_seconds ? parsed_timestamp / 1000000 : parsed_timestamp;
4751  return 1;
4752  } else {
4753  av_log(s, AV_LOG_WARNING, "Failed to parse creation_time %s\n", entry->value);
4754  return ret;
4755  }
4756  }
4757  return 0;
4758 }
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1517
static int add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt, AVPacketList **plast_pktl, int ref)
Definition: utils.c:372
unsigned int max_index_size
Maximum amount of memory in bytes to use for the index of each stream.
Definition: avformat.h:1498
void av_url_split(char *proto, int proto_size, char *authorization, int authorization_size, char *hostname, int hostname_size, int *port_ptr, char *path, int path_size, const char *url)
Split a URL string into components.
Definition: utils.c:4029
#define AVSEEK_FLAG_BACKWARD
Definition: avformat.h:2348
int64_t probesize
Maximum size of the data read from input for determining the input container format.
Definition: avformat.h:1454
static void free_stream(AVStream **pst)
Definition: utils.c:3697
int av_opt_get_dict_val(void *obj, const char *name, int search_flags, AVDictionary **out_val)
Definition: opt.c:903
int64_t first_dts
Timestamp corresponding to the last dts sync point.
Definition: avformat.h:1053
#define NULL
Definition: coverity.c:32
AVChapter * avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
Add a new chapter.
Definition: utils.c:3911
const struct AVCodec * codec
Definition: avcodec.h:1541
full parsing and interpolation of timestamps for frames not starting on a packet boundary ...
Definition: avformat.h:809
AVRational framerate
Definition: avcodec.h:3212
const char const char void * val
Definition: avisynth_c.h:634
#define AV_CODEC_PROP_INTRA_ONLY
Codec uses only intra compression.
Definition: avcodec.h:595
static void fill_all_stream_timings(AVFormatContext *ic)
Definition: utils.c:2419
#define AVFMT_NOBINSEARCH
Format does not allow to fall back on binary search via read_timestamp.
Definition: avformat.h:491
int64_t duration_gcd
Definition: avformat.h:1017
const char * s
Definition: avisynth_c.h:631
Bytestream IO Context.
Definition: avio.h:111
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static void update_initial_durations(AVFormatContext *s, AVStream *st, int stream_index, int duration)
Definition: utils.c:976
AVProbeData probe_data
Definition: avformat.h:1076
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:287
#define AV_PTS_WRAP_ADD_OFFSET
add the format specific offset on wrap detection
Definition: avformat.h:867
static int shift(int a, int b)
Definition: sonic.c:82
AVPacketSideDataType
Definition: avcodec.h:1249
enum AVCodecID id
Definition: internal.h:43
static av_const int av_isdigit(int c)
Locale-independent conversion of ASCII isdigit.
Definition: avstring.h:206
int av_demuxer_open(AVFormatContext *ic)
Definition: utils.c:320
char * recommended_encoder_configuration
String containing paris of key and values describing recommended encoder configuration.
Definition: avformat.h:1208
struct AVPacketList * raw_packet_buffer
Raw packets from the demuxer, prior to parsing and decoding.
Definition: internal.h:88
This structure describes decoded (raw) audio or video data.
Definition: frame.h:181
int64_t(* read_timestamp)(struct AVFormatContext *s, int stream_index, int64_t *pos, int64_t pos_limit)
Get the next timestamp in stream[stream_index].time_base units.
Definition: avformat.h:760
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1566
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
void(* ff_parse_key_val_cb)(void *context, const char *key, in