[FFmpeg-devel] [PATCH 2/2] Replace depecated AV_TIME_BASE{_Q, } with av_get_time_base{_q}()

Derek Buitenhuis derek.buitenhuis at gmail.com
Mon Dec 30 00:35:38 CET 2013


Signed-off-by: Derek Buitenhuis <derek.buitenhuis at gmail.com>
---
 doc/examples/filtering_video.c   |   4 +-
 ffmpeg.c                         | 123 ++++++++++++++++++++-------------------
 ffmpeg.h                         |  14 ++---
 ffmpeg_opt.c                     |  10 ++--
 ffplay.c                         |  14 ++---
 ffprobe.c                        |  51 ++++++++--------
 ffserver.c                       |   8 +--
 libavcodec/avcodec.h             |   2 +-
 libavcodec/libzvbi-teletextdec.c |   4 +-
 libavcodec/utils.c               |   2 +-
 libavcodec/xsubdec.c             |   2 +-
 libavdevice/caca.c               |   2 +-
 libavdevice/lavfi.c              |   2 +-
 libavdevice/v4l2.c               |   6 +-
 libavfilter/aeval.c              |   2 +-
 libavfilter/af_afade.c           |   4 +-
 libavfilter/af_ladspa.c          |   2 +-
 libavfilter/asrc_sine.c          |   2 +-
 libavfilter/avf_concat.c         |   2 +-
 libavfilter/avfilter.c           |   4 +-
 libavfilter/avfilter.h           |   2 +-
 libavfilter/f_interleave.c       |   8 +--
 libavfilter/f_sendcmd.c          |   2 +-
 libavfilter/f_settb.c            |   4 +-
 libavfilter/framesync.c          |   4 +-
 libavfilter/trim.c               |   6 +-
 libavfilter/vf_fade.c            |  16 ++---
 libavfilter/vf_fps.c             |   6 +-
 libavfilter/vf_hue.c             |   2 +-
 libavfilter/vf_vignette.c        |   2 +-
 libavfilter/vsrc_mptestsrc.c     |   2 +-
 libavfilter/vsrc_testsrc.c       |   2 +-
 libavformat/avformat.h           |  12 ++--
 libavformat/avidec.c             |  20 +++----
 libavformat/avio.h               |   4 +-
 libavformat/concatdec.c          |   6 +-
 libavformat/dxa.c                |   2 +-
 libavformat/ffmetadec.c          |   2 +-
 libavformat/flvdec.c             |   4 +-
 libavformat/flvenc.c             |   2 +-
 libavformat/hdsenc.c             |   2 +-
 libavformat/hls.c                |  14 ++---
 libavformat/hlsenc.c             |   4 +-
 libavformat/hlsproto.c           |   4 +-
 libavformat/librtmp.c            |   2 +-
 libavformat/matroskadec.c        |   4 +-
 libavformat/mov.c                |   8 +--
 libavformat/movenc.c             |   2 +-
 libavformat/mpeg.c               |   6 +-
 libavformat/mpegenc.c            |  10 ++--
 libavformat/mpegtsenc.c          |   6 +-
 libavformat/mux.c                |  16 ++---
 libavformat/nutdec.c             |   6 +-
 libavformat/oggenc.c             |   8 +--
 libavformat/options_table.h      |   2 +-
 libavformat/rmdec.c              |   2 +-
 libavformat/rtpdec.c             |   4 +-
 libavformat/rtpenc.c             |   2 +-
 libavformat/rtsp.h               |   2 +-
 libavformat/rtspdec.c            |   8 +--
 libavformat/sbgdec.c             |  22 +++----
 libavformat/seek-test.c          |   6 +-
 libavformat/seek.c               |   4 +-
 libavformat/segment.c            |   9 +--
 libavformat/smoothstreamingenc.c |   2 +-
 libavformat/utils.c              |  56 +++++++++---------
 libavutil/utils.c                |   4 +-
 tools/aviocat.c                  |   2 +-
 tools/ismindex.c                 |   2 +-
 69 files changed, 297 insertions(+), 290 deletions(-)

diff --git a/doc/examples/filtering_video.c b/doc/examples/filtering_video.c
index 790c641..c47f3a3 100644
--- a/doc/examples/filtering_video.c
+++ b/doc/examples/filtering_video.c
@@ -161,9 +161,9 @@ static void display_frame(const AVFrame *frame, AVRational time_base)
     if (frame->pts != AV_NOPTS_VALUE) {
         if (last_pts != AV_NOPTS_VALUE) {
             /* sleep roughly the right amount of time;
-             * usleep is in microseconds, just like AV_TIME_BASE. */
+             * usleep is in microseconds, just like av_get_time_base(). */
             delay = av_rescale_q(frame->pts - last_pts,
-                                 time_base, AV_TIME_BASE_Q);
+                                 time_base, av_get_time_base_q());
             if (delay > 0 && delay < 1000000)
                 usleep(delay);
         }
diff --git a/ffmpeg.c b/ffmpeg.c
index 5ccbf10..8474cb5 100644
--- a/ffmpeg.c
+++ b/ffmpeg.c
@@ -241,9 +241,9 @@ static void sub2video_update(InputStream *ist, AVSubtitle *sub)
         return;
     if (sub) {
         pts       = av_rescale_q(sub->pts + sub->start_display_time * 1000,
-                                 AV_TIME_BASE_Q, ist->st->time_base);
+                                 av_get_time_base_q(), ist->st->time_base);
         end_pts   = av_rescale_q(sub->pts + sub->end_display_time   * 1000,
-                                 AV_TIME_BASE_Q, ist->st->time_base);
+                                 av_get_time_base_q(), ist->st->time_base);
         num_rects = sub->num_rects;
     } else {
         pts       = ist->sub2video.end_pts;
@@ -657,7 +657,7 @@ static void close_output_stream(OutputStream *ost)
 
     ost->finished = 1;
     if (of->shortest) {
-        int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->st->codec->time_base, AV_TIME_BASE_Q);
+        int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->st->codec->time_base, av_get_time_base_q());
         of->recording_time = FFMIN(of->recording_time, end);
     }
 }
@@ -668,7 +668,7 @@ static int check_recording_time(OutputStream *ost)
 
     if (of->recording_time != INT64_MAX &&
         av_compare_ts(ost->sync_opts - ost->first_pts, ost->st->codec->time_base, of->recording_time,
-                      AV_TIME_BASE_Q) >= 0) {
+                      av_get_time_base_q()) >= 0) {
         close_output_stream(ost);
         return 0;
     }
@@ -760,13 +760,13 @@ static void do_subtitle_out(AVFormatContext *s,
     if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
         pts -= output_files[ost->file_index]->start_time;
     for (i = 0; i < nb; i++) {
-        ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
+        ost->sync_opts = av_rescale_q(pts, av_get_time_base_q(), enc->time_base);
         if (!check_recording_time(ost))
             return;
 
         sub->pts = pts;
         // start_display_time is required to be 0
-        sub->pts               += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
+        sub->pts               += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, av_get_time_base_q());
         sub->end_display_time  -= sub->start_display_time;
         sub->start_display_time = 0;
         if (i == 1)
@@ -781,7 +781,7 @@ static void do_subtitle_out(AVFormatContext *s,
         av_init_packet(&pkt);
         pkt.data = subtitle_out;
         pkt.size = subtitle_out_size;
-        pkt.pts  = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
+        pkt.pts  = av_rescale_q(sub->pts, av_get_time_base_q(), ost->st->time_base);
         pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
         if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
             /* XXX: the pts correction is handled here. Maybe handling
@@ -1100,7 +1100,7 @@ static int reap_filters(void)
                                                 ost->filter->filter->inputs[0]->time_base,
                                                 ost->st->codec->time_base) -
                                     av_rescale_q(start_time,
-                                                AV_TIME_BASE_Q,
+                                                av_get_time_base_q(),
                                                 ost->st->codec->time_base);
             }
             //if (ost->source_index >= 0)
@@ -1241,11 +1241,11 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
         /* compute min output value */
         if ((is_last_report || !ost->finished) && ost->st->pts.val != AV_NOPTS_VALUE)
             pts = FFMAX(pts, av_rescale_q(ost->st->pts.val,
-                                          ost->st->time_base, AV_TIME_BASE_Q));
+                                          ost->st->time_base, av_get_time_base_q()));
     }
 
-    secs = pts / AV_TIME_BASE;
-    us = pts % AV_TIME_BASE;
+    secs = pts / av_get_time_base();
+    us = pts % av_get_time_base();
     mins = secs / 60;
     secs %= 60;
     hours = mins / 60;
@@ -1259,7 +1259,7 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
                                  "size=%8.0fkB time=", total_size / 1024.0);
     snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
              "%02d:%02d:%02d.%02d ", hours, mins, secs,
-             (100 * us) / AV_TIME_BASE);
+             (100 * us) / av_get_time_base());
     if (bitrate < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
                               "bitrate=N/A");
     else             snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
@@ -1414,8 +1414,8 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p
     OutputFile *of = output_files[ost->file_index];
     InputFile   *f = input_files [ist->file_index];
     int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
-    int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
-    int64_t ist_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ist->st->time_base);
+    int64_t ost_tb_start_time = av_rescale_q(start_time, av_get_time_base_q(), ost->st->time_base);
+    int64_t ist_tb_start_time = av_rescale_q(start_time, av_get_time_base_q(), ist->st->time_base);
     AVPicture pict;
     AVPacket opkt;
 
@@ -1467,7 +1467,7 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p
         opkt.pts = AV_NOPTS_VALUE;
 
     if (pkt->dts == AV_NOPTS_VALUE)
-        opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
+        opkt.dts = av_rescale_q(ist->dts, av_get_time_base_q(), ost->st->time_base);
     else
         opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
     opkt.dts -= ost_tb_start_time;
@@ -1576,9 +1576,9 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
 #if 1
     /* increment next_dts to use for the case where the input stream does not
        have timestamps or there are multiple frames in the packet */
-    ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
+    ist->next_pts += ((int64_t)av_get_time_base() * decoded_frame->nb_samples) /
                      avctx->sample_rate;
-    ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
+    ist->next_dts += ((int64_t)av_get_time_base() * decoded_frame->nb_samples) /
                      avctx->sample_rate;
 #endif
 
@@ -1636,7 +1636,7 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
     /* if the decoder provides a pts, use it instead of the last packet pts.
        the decoder could be delaying output by a packet or more. */
     if (decoded_frame->pts != AV_NOPTS_VALUE) {
-        ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
+        ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, av_get_time_base_q());
         decoded_frame_tb   = avctx->time_base;
     } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
         decoded_frame->pts = decoded_frame->pkt_pts;
@@ -1648,7 +1648,7 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
         decoded_frame_tb   = ist->st->time_base;
     }else {
         decoded_frame->pts = ist->dts;
-        decoded_frame_tb   = AV_TIME_BASE_Q;
+        decoded_frame_tb   = av_get_time_base_q();
     }
     if (decoded_frame->pts != AV_NOPTS_VALUE)
         decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
@@ -1688,7 +1688,7 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
     if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
         return AVERROR(ENOMEM);
     decoded_frame = ist->decoded_frame;
-    pkt->dts  = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
+    pkt->dts  = av_rescale_q(ist->dts, av_get_time_base_q(), ist->st->time_base);
 
     update_benchmark(NULL);
     ret = avcodec_decode_video2(ist->st->codec,
@@ -1722,7 +1722,7 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
 
     best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
     if(best_effort_timestamp != AV_NOPTS_VALUE)
-        ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
+        ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, av_get_time_base_q());
 
     if (debug_ts) {
         av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
@@ -1809,7 +1809,7 @@ static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
         int end = 1;
         if (ist->prev_sub.got_output) {
             end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
-                             1000, AV_TIME_BASE);
+                             1000, av_get_time_base());
             if (end < ist->prev_sub.subtitle.end_display_time) {
                 av_log(ist->st->codec, AV_LOG_DEBUG,
                        "Subtitle duration reduced from %d to %d%s\n",
@@ -1855,10 +1855,10 @@ static int output_packet(InputStream *ist, const AVPacket *pkt)
 
     AVPacket avpkt;
     if (!ist->saw_first_ts) {
-        ist->dts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
+        ist->dts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames * av_get_time_base() / av_q2d(ist->st->avg_frame_rate) : 0;
         ist->pts = 0;
         if (pkt != NULL && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
-            ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
+            ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, av_get_time_base_q());
             ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
         }
         ist->saw_first_ts = 1;
@@ -1880,7 +1880,7 @@ static int output_packet(InputStream *ist, const AVPacket *pkt)
     }
 
     if (pkt->dts != AV_NOPTS_VALUE) {
-        ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
+        ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, av_get_time_base_q());
         if (ist->st->codec->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
             ist->next_pts = ist->pts = ist->dts;
     }
@@ -1906,10 +1906,10 @@ static int output_packet(InputStream *ist, const AVPacket *pkt)
         case AVMEDIA_TYPE_VIDEO:
             ret = decode_video    (ist, &avpkt, &got_output);
             if (avpkt.duration) {
-                duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
+                duration = av_rescale_q(avpkt.duration, ist->st->time_base, av_get_time_base_q());
             } else if(ist->st->codec->time_base.num != 0 && ist->st->codec->time_base.den != 0) {
                 int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame;
-                duration = ((int64_t)AV_TIME_BASE *
+                duration = ((int64_t)av_get_time_base() *
                                 ist->st->codec->time_base.num * ticks) /
                                 ist->st->codec->time_base.den;
             } else
@@ -1953,20 +1953,20 @@ static int output_packet(InputStream *ist, const AVPacket *pkt)
         ist->dts = ist->next_dts;
         switch (ist->st->codec->codec_type) {
         case AVMEDIA_TYPE_AUDIO:
-            ist->next_dts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) /
+            ist->next_dts += ((int64_t)av_get_time_base() * ist->st->codec->frame_size) /
                              ist->st->codec->sample_rate;
             break;
         case AVMEDIA_TYPE_VIDEO:
             if (ist->framerate.num) {
                 // TODO: Remove work-around for c99-to-c89 issue 7
-                AVRational time_base_q = AV_TIME_BASE_Q;
+                AVRational time_base_q = av_get_time_base_q();
                 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
                 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
             } else if (pkt->duration) {
-                ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
+                ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, av_get_time_base_q());
             } else if(ist->st->codec->time_base.num != 0) {
                 int ticks= ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame;
-                ist->next_dts += ((int64_t)AV_TIME_BASE *
+                ist->next_dts += ((int64_t)av_get_time_base() *
                                   ist->st->codec->time_base.num * ticks) /
                                   ist->st->codec->time_base.den;
             }
@@ -2156,7 +2156,7 @@ static void parse_forced_key_frames(char *kf, OutputStream *ost,
                 exit_program(1);
             }
             t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
-            t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
+            t = av_rescale_q(t, av_get_time_base_q(), avctx->time_base);
 
             for (j = 0; j < avf->nb_chapters; j++) {
                 AVChapter *c = avf->chapters[j];
@@ -2169,7 +2169,7 @@ static void parse_forced_key_frames(char *kf, OutputStream *ost,
 
             t = parse_time_or_die("force_key_frames", p, 1);
             av_assert1(index < size);
-            pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
+            pts[index++] = av_rescale_q(t, av_get_time_base_q(), avctx->time_base);
 
         }
 
@@ -2469,7 +2469,7 @@ static int transcode_init(void)
                 }
                 for (j = 0; j < ost->forced_kf_count; j++)
                     ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
-                                                         AV_TIME_BASE_Q,
+                                                         av_get_time_base_q(),
                                                          codec->time_base);
 
                 codec->width  = ost->filter->filter->inputs[0]->w;
@@ -2769,7 +2769,7 @@ static OutputStream *choose_output(void)
     for (i = 0; i < nb_output_streams; i++) {
         OutputStream *ost = output_streams[i];
         int64_t opts = av_rescale_q(ost->st->cur_dts, ost->st->time_base,
-                                    AV_TIME_BASE_Q);
+                                    av_get_time_base_q());
         if (!ost->unavailable && !ost->finished && opts < opts_min) {
             opts_min = opts;
             ost_min  = ost;
@@ -2991,7 +2991,7 @@ static int get_input_packet(InputFile *f, AVPacket *pkt)
         int i;
         for (i = 0; i < f->nb_streams; i++) {
             InputStream *ist = input_streams[f->ist_index + i];
-            int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
+            int64_t pts = av_rescale(ist->dts, 1000000, av_get_time_base());
             int64_t now = av_gettime() - ist->start;
             if (pts > now)
                 return AVERROR(EAGAIN);
@@ -3036,6 +3036,7 @@ static int process_input(int file_index)
     AVFormatContext *is;
     InputStream *ist;
     AVPacket pkt;
+    AVRational tb = av_get_time_base_q();
     int ret, i, j;
 
     is  = ifile->ctx;
@@ -3092,12 +3093,12 @@ static int process_input(int file_index)
         av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
                "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
                ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->st->codec->codec_type),
-               av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
-               av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
+               av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &tb),
+               av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &tb),
                av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
                av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
                av_ts2str(input_files[ist->file_index]->ts_offset),
-               av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
+               av_ts2timestr(input_files[ist->file_index]->ts_offset, &tb));
     }
 
     if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
@@ -3113,7 +3114,7 @@ static int process_input(int file_index)
                 AVStream *st = is->streams[i];
                 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
                     continue;
-                new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
+                new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, av_get_time_base_q()));
             }
             if (new_start_time > is->start_time) {
                 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
@@ -3121,7 +3122,7 @@ static int process_input(int file_index)
             }
         }
 
-        stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
+        stime = av_rescale_q(is->start_time, av_get_time_base_q(), ist->st->time_base);
         stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
         ist->wrap_correction_done = 1;
 
@@ -3136,9 +3137,9 @@ static int process_input(int file_index)
     }
 
     if (pkt.dts != AV_NOPTS_VALUE)
-        pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
+        pkt.dts += av_rescale_q(ifile->ts_offset, av_get_time_base_q(), ist->st->time_base);
     if (pkt.pts != AV_NOPTS_VALUE)
-        pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
+        pkt.pts += av_rescale_q(ifile->ts_offset, av_get_time_base_q(), ist->st->time_base);
 
     if (pkt.pts != AV_NOPTS_VALUE)
         pkt.pts *= ist->ts_scale;
@@ -3147,50 +3148,50 @@ static int process_input(int file_index)
 
     if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
         && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
-        int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
+        int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, av_get_time_base_q());
         int64_t delta   = pkt_dts - ifile->last_ts;
-        if(delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
-            (delta > 1LL*dts_delta_threshold*AV_TIME_BASE &&
+        if(delta < -1LL*dts_delta_threshold*av_get_time_base() ||
+            (delta > 1LL*dts_delta_threshold*av_get_time_base() &&
                 ist->st->codec->codec_type != AVMEDIA_TYPE_SUBTITLE)){
             ifile->ts_offset -= delta;
             av_log(NULL, AV_LOG_DEBUG,
                    "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
                    delta, ifile->ts_offset);
-            pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
+            pkt.dts -= av_rescale_q(delta, av_get_time_base_q(), ist->st->time_base);
             if (pkt.pts != AV_NOPTS_VALUE)
-                pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
+                pkt.pts -= av_rescale_q(delta, av_get_time_base_q(), ist->st->time_base);
         }
     }
 
     if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
         !copy_ts) {
-        int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
+        int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, av_get_time_base_q());
         int64_t delta   = pkt_dts - ist->next_dts;
         if (is->iformat->flags & AVFMT_TS_DISCONT) {
-        if(delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
-            (delta > 1LL*dts_delta_threshold*AV_TIME_BASE &&
+        if(delta < -1LL*dts_delta_threshold*av_get_time_base() ||
+            (delta > 1LL*dts_delta_threshold*av_get_time_base() &&
                 ist->st->codec->codec_type != AVMEDIA_TYPE_SUBTITLE) ||
-            pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)){
+            pkt_dts + av_get_time_base()/10 < FFMAX(ist->pts, ist->dts)){
             ifile->ts_offset -= delta;
             av_log(NULL, AV_LOG_DEBUG,
                    "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
                    delta, ifile->ts_offset);
-            pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
+            pkt.dts -= av_rescale_q(delta, av_get_time_base_q(), ist->st->time_base);
             if (pkt.pts != AV_NOPTS_VALUE)
-                pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
+                pkt.pts -= av_rescale_q(delta, av_get_time_base_q(), ist->st->time_base);
         }
         } else {
-            if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
-                (delta > 1LL*dts_error_threshold*AV_TIME_BASE && ist->st->codec->codec_type != AVMEDIA_TYPE_SUBTITLE)
+            if ( delta < -1LL*dts_error_threshold*av_get_time_base() ||
+                (delta > 1LL*dts_error_threshold*av_get_time_base() && ist->st->codec->codec_type != AVMEDIA_TYPE_SUBTITLE)
                ) {
                 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
                 pkt.dts = AV_NOPTS_VALUE;
             }
             if (pkt.pts != AV_NOPTS_VALUE){
-                int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
+                int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, av_get_time_base_q());
                 delta   = pkt_pts - ist->next_dts;
-                if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
-                    (delta > 1LL*dts_error_threshold*AV_TIME_BASE && ist->st->codec->codec_type != AVMEDIA_TYPE_SUBTITLE)
+                if ( delta < -1LL*dts_error_threshold*av_get_time_base() ||
+                    (delta > 1LL*dts_error_threshold*av_get_time_base() && ist->st->codec->codec_type != AVMEDIA_TYPE_SUBTITLE)
                    ) {
                     av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
                     pkt.pts = AV_NOPTS_VALUE;
@@ -3200,7 +3201,7 @@ static int process_input(int file_index)
     }
 
     if (pkt.dts != AV_NOPTS_VALUE)
-        ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
+        ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, av_get_time_base_q());
 
     if (debug_ts) {
         av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
@@ -3208,7 +3209,7 @@ static int process_input(int file_index)
                av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
                av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
                av_ts2str(input_files[ist->file_index]->ts_offset),
-               av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
+               av_ts2timestr(input_files[ist->file_index]->ts_offset, &tb));
     }
 
     sub2video_heartbeat(ist, pkt.pts);
diff --git a/ffmpeg.h b/ffmpeg.h
index 433baf8..4134a35 100644
--- a/ffmpeg.h
+++ b/ffmpeg.h
@@ -242,12 +242,12 @@ typedef struct InputStream {
 
     int64_t       start;     /* time when read started */
     /* predicted dts of the next packet read for this stream or (when there are
-     * several frames in a packet) of the next frame in current packet (in AV_TIME_BASE units) */
+     * several frames in a packet) of the next frame in current packet (in internal time base units) */
     int64_t       next_dts;
-    int64_t       dts;       ///< dts of the last packet read for this stream (in AV_TIME_BASE units)
+    int64_t       dts;       ///< dts of the last packet read for this stream (in internal time base units)
 
-    int64_t       next_pts;  ///< synthetic pts for the next decode frame (in AV_TIME_BASE units)
-    int64_t       pts;       ///< current pts of the decoded frame  (in AV_TIME_BASE units)
+    int64_t       next_pts;  ///< synthetic pts for the next decode frame (in internal time base units)
+    int64_t       pts;       ///< current pts of the decoded frame  (in internal time base units)
     int           wrap_correction_done;
 
     int64_t filter_in_rescale_delta_last;
@@ -315,7 +315,7 @@ typedef struct InputFile {
     int64_t input_ts_offset;
     int64_t ts_offset;
     int64_t last_ts;
-    int64_t start_time;   /* user-specified start time in AV_TIME_BASE or AV_NOPTS_VALUE */
+    int64_t start_time;   /* user-specified start time in internal time base units or AV_NOPTS_VALUE */
     int64_t recording_time;
     int nb_streams;       /* number of stream that ffmpeg is aware of; may be different
                              from ctx.nb_streams if new streams appear during av_read_frame() */
@@ -413,8 +413,8 @@ typedef struct OutputFile {
     AVFormatContext *ctx;
     AVDictionary *opts;
     int ost_index;       /* index of the first stream in output_streams */
-    int64_t recording_time;  ///< desired length of the resulting file in microseconds == AV_TIME_BASE units
-    int64_t start_time;      ///< start time in microseconds == AV_TIME_BASE units
+    int64_t recording_time;  ///< desired length of the resulting file in microseconds == internal time base units
+    int64_t start_time;      ///< start time in microseconds == internal time base units
     uint64_t limit_filesize; /* filesize limit expressed in bytes */
 
     int shortest;
diff --git a/ffmpeg_opt.c b/ffmpeg_opt.c
index d267c6d..852a916 100644
--- a/ffmpeg_opt.c
+++ b/ffmpeg_opt.c
@@ -875,7 +875,7 @@ static int open_input_file(OptionsContext *o, const char *filename)
         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, timestamp, 0);
         if (ret < 0) {
             av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
-                   filename, (double)timestamp / AV_TIME_BASE);
+                   filename, (double)timestamp / av_get_time_base());
         }
     }
 
@@ -1520,9 +1520,9 @@ static int copy_chapters(InputFile *ifile, OutputFile *ofile, int copy_metadata)
         AVChapter *in_ch = is->chapters[i], *out_ch;
         int64_t start_time = (ofile->start_time == AV_NOPTS_VALUE) ? 0 : ofile->start_time;
         int64_t ts_off   = av_rescale_q(start_time - ifile->ts_offset,
-                                       AV_TIME_BASE_Q, in_ch->time_base);
+                                       av_get_time_base_q(), in_ch->time_base);
         int64_t rt       = (ofile->recording_time == INT64_MAX) ? INT64_MAX :
-                           av_rescale_q(ofile->recording_time, AV_TIME_BASE_Q, in_ch->time_base);
+                           av_rescale_q(ofile->recording_time, av_get_time_base_q(), in_ch->time_base);
 
 
         if (in_ch->end < ts_off)
@@ -1978,10 +1978,10 @@ loop_end:
 
     if (o->mux_preload) {
         uint8_t buf[64];
-        snprintf(buf, sizeof(buf), "%d", (int)(o->mux_preload*AV_TIME_BASE));
+        snprintf(buf, sizeof(buf), "%d", (int)(o->mux_preload*av_get_time_base()));
         av_dict_set(&of->opts, "preload", buf, 0);
     }
-    oc->max_delay = (int)(o->mux_max_delay * AV_TIME_BASE);
+    oc->max_delay = (int)(o->mux_max_delay * av_get_time_base());
 
     /* copy metadata */
     for (i = 0; i < o->nb_metadata_map; i++) {
diff --git a/ffplay.c b/ffplay.c
index 164872a..17651d4 100644
--- a/ffplay.c
+++ b/ffplay.c
@@ -2043,7 +2043,7 @@ static int subtitle_thread(void *arg)
                                  &got_subtitle, pkt);
         if (got_subtitle && sp->sub.format == 0) {
             if (sp->sub.pts != AV_NOPTS_VALUE)
-                pts = sp->sub.pts / (double)AV_TIME_BASE;
+                pts = sp->sub.pts / (double)av_get_time_base();
             sp->pts = pts;
             sp->serial = serial;
 
@@ -2785,7 +2785,7 @@ static int read_thread(void *arg)
         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
         if (ret < 0) {
             av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
-                    is->filename, (double)timestamp / AV_TIME_BASE);
+                    is->filename, (double)timestamp / av_get_time_base());
         }
     }
 
@@ -2897,7 +2897,7 @@ static int read_thread(void *arg)
                 if (is->seek_flags & AVSEEK_FLAG_BYTE) {
                    set_clock(&is->extclk, NAN, 0);
                 } else {
-                   set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
+                   set_clock(&is->extclk, seek_target / (double)av_get_time_base(), 0);
                 }
             }
             is->seek_req = 0;
@@ -3247,11 +3247,11 @@ static void event_loop(VideoState *cur_stream)
                     } else {
                         pos = get_master_clock(cur_stream);
                         if (isnan(pos))
-                            pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
+                            pos = (double)cur_stream->seek_pos / av_get_time_base();
                         pos += incr;
-                        if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
-                            pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
-                        stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
+                        if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)av_get_time_base())
+                            pos = cur_stream->ic->start_time / (double)av_get_time_base();
+                        stream_seek(cur_stream, (int64_t)(pos * av_get_time_base()), (int64_t)(incr * av_get_time_base()), 0);
                     }
                 break;
             default:
diff --git a/ffprobe.c b/ffprobe.c
index 0374d37..372b4d9 100644
--- a/ffprobe.c
+++ b/ffprobe.c
@@ -83,7 +83,7 @@ static char *stream_specifier;
 
 typedef struct {
     int id;             ///< identifier
-    int64_t start, end; ///< start, end in second/AV_TIME_BASE units
+    int64_t start, end; ///< start, end in second/internal time base units
     int has_start, has_end;
     int start_is_offset, end_is_offset;
     int duration_frames;
@@ -626,14 +626,14 @@ static inline void writer_print_rational(WriterContext *wctx,
 }
 
 static void writer_print_time(WriterContext *wctx, const char *key,
-                              int64_t ts, const AVRational *time_base, int is_duration)
+                              int64_t ts, const AVRational time_base, int is_duration)
 {
     char buf[128];
 
     if ((!is_duration && ts == AV_NOPTS_VALUE) || (is_duration && ts == 0)) {
         writer_print_string(wctx, key, "N/A", PRINT_STRING_OPT);
     } else {
-        double d = ts * av_q2d(*time_base);
+        double d = ts * av_q2d(time_base);
         struct unit_value uv;
         uv.val.d = d;
         uv.unit = unit_second_str;
@@ -1665,13 +1665,13 @@ static void show_packet(WriterContext *w, AVFormatContext *fmt_ctx, AVPacket *pk
     else   print_str_opt("codec_type", "unknown");
     print_int("stream_index",     pkt->stream_index);
     print_ts  ("pts",             pkt->pts);
-    print_time("pts_time",        pkt->pts, &st->time_base);
+    print_time("pts_time",        pkt->pts, st->time_base);
     print_ts  ("dts",             pkt->dts);
-    print_time("dts_time",        pkt->dts, &st->time_base);
+    print_time("dts_time",        pkt->dts, st->time_base);
     print_duration_ts("duration",        pkt->duration);
-    print_duration_time("duration_time", pkt->duration, &st->time_base);
+    print_duration_time("duration_time", pkt->duration, st->time_base);
     print_duration_ts("convergence_duration", pkt->convergence_duration);
-    print_duration_time("convergence_duration_time", pkt->convergence_duration, &st->time_base);
+    print_duration_time("convergence_duration_time", pkt->convergence_duration, st->time_base);
     print_val("size",             pkt->size, unit_byte_str);
     if (pkt->pos != -1) print_fmt    ("pos", "%"PRId64, pkt->pos);
     else                print_str_opt("pos", "N/A");
@@ -1695,7 +1695,7 @@ static void show_subtitle(WriterContext *w, AVSubtitle *sub, AVStream *stream,
 
     print_str ("media_type",         "subtitle");
     print_ts  ("pts",                 sub->pts);
-    print_time("pts_time",            sub->pts, &AV_TIME_BASE_Q);
+    print_time("pts_time",            sub->pts, av_get_time_base_q());
     print_int ("format",              sub->format);
     print_int ("start_display_time",  sub->start_display_time);
     print_int ("end_display_time",    sub->end_display_time);
@@ -1722,13 +1722,13 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream,
     else   print_str_opt("media_type", "unknown");
     print_int("key_frame",              frame->key_frame);
     print_ts  ("pkt_pts",               frame->pkt_pts);
-    print_time("pkt_pts_time",          frame->pkt_pts, &stream->time_base);
+    print_time("pkt_pts_time",          frame->pkt_pts, stream->time_base);
     print_ts  ("pkt_dts",               frame->pkt_dts);
-    print_time("pkt_dts_time",          frame->pkt_dts, &stream->time_base);
+    print_time("pkt_dts_time",          frame->pkt_dts, stream->time_base);
     print_ts  ("best_effort_timestamp", av_frame_get_best_effort_timestamp(frame));
-    print_time("best_effort_timestamp_time", av_frame_get_best_effort_timestamp(frame), &stream->time_base);
+    print_time("best_effort_timestamp_time", av_frame_get_best_effort_timestamp(frame), stream->time_base);
     print_duration_ts  ("pkt_duration",      av_frame_get_pkt_duration(frame));
-    print_duration_time("pkt_duration_time", av_frame_get_pkt_duration(frame), &stream->time_base);
+    print_duration_time("pkt_duration_time", av_frame_get_pkt_duration(frame), stream->time_base);
     if (av_frame_get_pkt_pos (frame) != -1) print_fmt    ("pkt_pos", "%"PRId64, av_frame_get_pkt_pos(frame));
     else                      print_str_opt("pkt_pos", "N/A");
     if (av_frame_get_pkt_size(frame) != -1) print_fmt    ("pkt_size", "%d", av_frame_get_pkt_size(frame));
@@ -1826,11 +1826,13 @@ static av_always_inline int process_frame(WriterContext *w,
 
 static void log_read_interval(const ReadInterval *interval, void *log_ctx, int log_level)
 {
+    AVRational tb = av_get_time_base_q();
+
     av_log(log_ctx, log_level, "id:%d", interval->id);
 
     if (interval->has_start) {
         av_log(log_ctx, log_level, " start:%s%s", interval->start_is_offset ? "+" : "",
-               av_ts2timestr(interval->start, &AV_TIME_BASE_Q));
+               av_ts2timestr(interval->start, &tb));
     } else {
         av_log(log_ctx, log_level, " start:N/A");
     }
@@ -1840,7 +1842,7 @@ static void log_read_interval(const ReadInterval *interval, void *log_ctx, int l
         if (interval->duration_frames)
             av_log(log_ctx, log_level, "#%"PRId64, interval->end);
         else
-            av_log(log_ctx, log_level, "%s", av_ts2timestr(interval->end, &AV_TIME_BASE_Q));
+            av_log(log_ctx, log_level, "%s", av_ts2timestr(interval->end, &tb));
     } else {
         av_log(log_ctx, log_level, " end:N/A");
     }
@@ -1852,6 +1854,7 @@ static int read_interval_packets(WriterContext *w, AVFormatContext *fmt_ctx,
                                  const ReadInterval *interval, int64_t *cur_ts)
 {
     AVPacket pkt, pkt1;
+    AVRational tb = av_get_time_base_q();
     AVFrame *frame = NULL;
     int ret = 0, i = 0, frame_count = 0;
     int64_t start = -INT64_MAX, end = interval->end;
@@ -1878,7 +1881,7 @@ static int read_interval_packets(WriterContext *w, AVFormatContext *fmt_ctx,
         }
 
         av_log(NULL, AV_LOG_VERBOSE, "Seeking to read interval start point %s\n",
-               av_ts2timestr(target, &AV_TIME_BASE_Q));
+               av_ts2timestr(target, &tb));
         if ((ret = avformat_seek_file(fmt_ctx, -1, -INT64_MAX, target, INT64_MAX, 0)) < 0) {
             av_log(NULL, AV_LOG_ERROR, "Could not seek to position %"PRId64": %s\n",
                    interval->start, av_err2str(ret));
@@ -1892,7 +1895,7 @@ static int read_interval_packets(WriterContext *w, AVFormatContext *fmt_ctx,
             AVRational tb = fmt_ctx->streams[pkt.stream_index]->time_base;
 
             if (pkt.pts != AV_NOPTS_VALUE)
-                *cur_ts = av_rescale_q(pkt.pts, tb, AV_TIME_BASE_Q);
+                *cur_ts = av_rescale_q(pkt.pts, tb, av_get_time_base_q());
 
             if (!has_start && *cur_ts != AV_NOPTS_VALUE) {
                 start = *cur_ts;
@@ -2090,9 +2093,9 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
     print_q("avg_frame_rate", stream->avg_frame_rate, '/');
     print_q("time_base",      stream->time_base,      '/');
     print_ts  ("start_pts",   stream->start_time);
-    print_time("start_time",  stream->start_time, &stream->time_base);
+    print_time("start_time",  stream->start_time, stream->time_base);
     print_ts  ("duration_ts", stream->duration);
-    print_time("duration",    stream->duration, &stream->time_base);
+    print_time("duration",    stream->duration, stream->time_base);
     if (dec_ctx->bit_rate > 0) print_val    ("bit_rate", dec_ctx->bit_rate, unit_bit_per_second_str);
     else                       print_str_opt("bit_rate", "N/A");
     if (stream->nb_frames) print_fmt    ("nb_frames", "%"PRId64, stream->nb_frames);
@@ -2163,9 +2166,9 @@ static int show_program(WriterContext *w, AVFormatContext *fmt_ctx, AVProgram *p
     print_int("pmt_pid", program->pmt_pid);
     print_int("pcr_pid", program->pcr_pid);
     print_ts("start_pts", program->start_time);
-    print_time("start_time", program->start_time, &AV_TIME_BASE_Q);
+    print_time("start_time", program->start_time, av_get_time_base_q());
     print_ts("end_pts", program->end_time);
-    print_time("end_time", program->end_time, &AV_TIME_BASE_Q);
+    print_time("end_time", program->end_time, av_get_time_base_q());
     if (do_show_program_tags)
         ret = show_tags(w, program->metadata, SECTION_ID_PROGRAM_TAGS);
     if (ret < 0)
@@ -2215,9 +2218,9 @@ static int show_chapters(WriterContext *w, AVFormatContext *fmt_ctx)
         print_int("id", chapter->id);
         print_q  ("time_base", chapter->time_base, '/');
         print_int("start", chapter->start);
-        print_time("start_time", chapter->start, &chapter->time_base);
+        print_time("start_time", chapter->start, chapter->time_base);
         print_int("end", chapter->end);
-        print_time("end_time", chapter->end, &chapter->time_base);
+        print_time("end_time", chapter->end, chapter->time_base);
         if (do_show_chapter_tags)
             ret = show_tags(w, chapter->metadata, SECTION_ID_CHAPTER_TAGS);
         writer_print_section_footer(w);
@@ -2242,8 +2245,8 @@ static int show_format(WriterContext *w, AVFormatContext *fmt_ctx)
         if (fmt_ctx->iformat->long_name) print_str    ("format_long_name", fmt_ctx->iformat->long_name);
         else                             print_str_opt("format_long_name", "unknown");
     }
-    print_time("start_time",      fmt_ctx->start_time, &AV_TIME_BASE_Q);
-    print_time("duration",        fmt_ctx->duration,   &AV_TIME_BASE_Q);
+    print_time("start_time",      fmt_ctx->start_time, av_get_time_base_q());
+    print_time("duration",        fmt_ctx->duration,   av_get_time_base_q());
     if (size >= 0) print_val    ("size", size, unit_byte_str);
     else           print_str_opt("size", "N/A");
     if (fmt_ctx->bit_rate > 0) print_val    ("bit_rate", fmt_ctx->bit_rate, unit_bit_per_second_str);
diff --git a/ffserver.c b/ffserver.c
index e3053d5..343ed6d 100644
--- a/ffserver.c
+++ b/ffserver.c
@@ -2312,7 +2312,7 @@ static int http_prepare_data(HTTPContext *c)
          * Default value from FFmpeg
          * Try to set it use configuration option
          */
-        c->fmt_ctx.max_delay = (int)(0.7*AV_TIME_BASE);
+        c->fmt_ctx.max_delay = (int)(0.7*av_get_time_base());
 
         if ((ret = avformat_write_header(&c->fmt_ctx, NULL)) < 0) {
             http_log("Error writing output header for stream '%s': %s\n",
@@ -2369,7 +2369,7 @@ static int http_prepare_data(HTTPContext *c)
                 int source_index = pkt.stream_index;
                 /* update first pts if needed */
                 if (c->first_pts == AV_NOPTS_VALUE) {
-                    c->first_pts = av_rescale_q(pkt.dts, c->fmt_in->streams[pkt.stream_index]->time_base, AV_TIME_BASE_Q);
+                    c->first_pts = av_rescale_q(pkt.dts, c->fmt_in->streams[pkt.stream_index]->time_base, av_get_time_base_q());
                     c->start_time = cur_time;
                 }
                 /* send it to the appropriate stream */
@@ -2407,9 +2407,9 @@ static int http_prepare_data(HTTPContext *c)
                        connection). XXX: need more abstract handling */
                     if (c->is_packetized) {
                         /* compute send time and duration */
-                        c->cur_pts = av_rescale_q(pkt.dts, ist->time_base, AV_TIME_BASE_Q);
+                        c->cur_pts = av_rescale_q(pkt.dts, ist->time_base, av_get_time_base_q());
                         c->cur_pts -= c->first_pts;
-                        c->cur_frame_duration = av_rescale_q(pkt.duration, ist->time_base, AV_TIME_BASE_Q);
+                        c->cur_frame_duration = av_rescale_q(pkt.duration, ist->time_base, av_get_time_base_q());
                         /* find RTP context */
                         c->packet_stream_index = pkt.stream_index;
                         ctx = c->rtp_ctx[c->packet_stream_index];
diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h
index c614829..5d4bc02 100644
--- a/libavcodec/avcodec.h
+++ b/libavcodec/avcodec.h
@@ -3284,7 +3284,7 @@ typedef struct AVSubtitle {
     uint32_t end_display_time; /* relative to packet pts, in ms */
     unsigned num_rects;
     AVSubtitleRect **rects;
-    int64_t pts;    ///< Same as packet pts, in AV_TIME_BASE
+    int64_t pts;    ///< Same as packet pts, in the internal time base
 } AVSubtitle;
 
 /**
diff --git a/libavcodec/libzvbi-teletextdec.c b/libavcodec/libzvbi-teletextdec.c
index abc24c3..8ab5525 100644
--- a/libavcodec/libzvbi-teletextdec.c
+++ b/libavcodec/libzvbi-teletextdec.c
@@ -94,7 +94,7 @@ static int create_ass_text(TeletextContext *ctx, const char *text, char **ass)
 {
     int ret;
     AVBPrint buf, buf2;
-    const int ts_start    = av_rescale_q(ctx->pts,          AV_TIME_BASE_Q,        (AVRational){1, 100});
+    const int ts_start    = av_rescale_q(ctx->pts,          av_get_time_base_q(),        (AVRational){1, 100});
     const int ts_duration = av_rescale_q(ctx->sub_duration, (AVRational){1, 1000}, (AVRational){1, 100});
 
     /* First we escape the plain text into buf. */
@@ -384,7 +384,7 @@ static int teletext_decode_frame(AVCodecContext *avctx, void *data, int *data_si
         return AVERROR(ENOMEM);
 
     if (avctx->pkt_timebase.den && pkt->pts != AV_NOPTS_VALUE)
-        ctx->pts = av_rescale_q(pkt->pts, avctx->pkt_timebase, AV_TIME_BASE_Q);
+        ctx->pts = av_rescale_q(pkt->pts, avctx->pkt_timebase, av_get_time_base_q());
 
     if (left) {
         // We allow unreasonably big packets, even if the standard only allows a max size of 1472
diff --git a/libavcodec/utils.c b/libavcodec/utils.c
index c000d27..179a7af 100644
--- a/libavcodec/utils.c
+++ b/libavcodec/utils.c
@@ -2459,7 +2459,7 @@ int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
 
             if (avctx->pkt_timebase.den && avpkt->pts != AV_NOPTS_VALUE)
                 sub->pts = av_rescale_q(avpkt->pts,
-                                        avctx->pkt_timebase, AV_TIME_BASE_Q);
+                                        avctx->pkt_timebase, av_get_time_base_q());
             ret = avctx->codec->decode(avctx, sub, got_sub_ptr, &pkt_recoded);
             av_assert1((ret >= 0) >= !!*got_sub_ptr &&
                        !!*got_sub_ptr >= !!sub->num_rects);
diff --git a/libavcodec/xsubdec.c b/libavcodec/xsubdec.c
index 174d74e..dd863df 100644
--- a/libavcodec/xsubdec.c
+++ b/libavcodec/xsubdec.c
@@ -70,7 +70,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
         return -1;
     }
     if (avpkt->pts != AV_NOPTS_VALUE)
-        packet_time = av_rescale_q(avpkt->pts, AV_TIME_BASE_Q, (AVRational){1, 1000});
+        packet_time = av_rescale_q(avpkt->pts, av_get_time_base_q(), (AVRational){1, 1000});
     sub->start_display_time = parse_timecode(buf +  1, packet_time);
     sub->end_display_time   = parse_timecode(buf + 14, packet_time);
     buf += 27;
diff --git a/libavdevice/caca.c b/libavdevice/caca.c
index 0a74701..373cdf8 100644
--- a/libavdevice/caca.c
+++ b/libavdevice/caca.c
@@ -178,7 +178,7 @@ static int caca_write_header(AVFormatContext *s)
     if (!c->window_title)
         c->window_title = av_strdup(s->filename);
     caca_set_display_title(c->display, c->window_title);
-    caca_set_display_time(c->display, av_rescale_q(1, st->codec->time_base, AV_TIME_BASE_Q));
+    caca_set_display_time(c->display, av_rescale_q(1, st->codec->time_base, av_get_time_base_q()));
 
     return 0;
 
diff --git a/libavdevice/lavfi.c b/libavdevice/lavfi.c
index a177ad0..3109538 100644
--- a/libavdevice/lavfi.c
+++ b/libavdevice/lavfi.c
@@ -343,7 +343,7 @@ static int lavfi_read_packet(AVFormatContext *avctx, AVPacket *pkt)
             continue;
         } else if (ret < 0)
             return ret;
-        d = av_rescale_q(frame->pts, tb, AV_TIME_BASE_Q);
+        d = av_rescale_q(frame->pts, tb, av_get_time_base_q());
         av_dlog(avctx, "sink_idx:%d time:%f\n", i, d);
         av_frame_unref(frame);
 
diff --git a/libavdevice/v4l2.c b/libavdevice/v4l2.c
index cb962b7..0cda3d9 100644
--- a/libavdevice/v4l2.c
+++ b/libavdevice/v4l2.c
@@ -441,7 +441,7 @@ static int init_convert_timestamp(AVFormatContext *ctx, int64_t ts)
 
     now = av_gettime();
     if (s->ts_mode == V4L_TS_ABS &&
-        ts <= now + 1 * AV_TIME_BASE && ts >= now - 10 * AV_TIME_BASE) {
+        ts <= now + 1 * av_get_time_base() && ts >= now - 10 * av_get_time_base()) {
         av_log(ctx, AV_LOG_INFO, "Detected absolute timestamps\n");
         s->ts_mode = V4L_TS_CONVERT_READY;
         return 0;
@@ -449,8 +449,8 @@ static int init_convert_timestamp(AVFormatContext *ctx, int64_t ts)
 #if HAVE_CLOCK_GETTIME && defined(CLOCK_MONOTONIC)
     now = av_gettime_monotonic();
     if (s->ts_mode == V4L_TS_MONO2ABS ||
-        (ts <= now + 1 * AV_TIME_BASE && ts >= now - 10 * AV_TIME_BASE)) {
-        AVRational tb = {AV_TIME_BASE, 1};
+        (ts <= now + 1 * av_get_time_base() && ts >= now - 10 * av_get_time_base())) {
+        AVRational tb = {av_get_time_base(), 1};
         int64_t period = av_rescale_q(1, tb, ctx->streams[0]->avg_frame_rate);
         av_log(ctx, AV_LOG_INFO, "Detected monotonic timestamps, converting\n");
         /* microseconds instead of seconds, MHz instead of Hz */
diff --git a/libavfilter/aeval.c b/libavfilter/aeval.c
index 2790cee..43d3491 100644
--- a/libavfilter/aeval.c
+++ b/libavfilter/aeval.c
@@ -259,7 +259,7 @@ static int request_frame(AVFilterLink *outlink)
     EvalContext *eval = outlink->src->priv;
     AVFrame *samplesref;
     int i, j;
-    int64_t t = av_rescale(eval->n, AV_TIME_BASE, eval->sample_rate);
+    int64_t t = av_rescale(eval->n, av_get_time_base(), eval->sample_rate);
 
     if (eval->duration >= 0 && t >= eval->duration)
         return AVERROR_EOF;
diff --git a/libavfilter/af_afade.c b/libavfilter/af_afade.c
index fbf9802..d3df29b 100644
--- a/libavfilter/af_afade.c
+++ b/libavfilter/af_afade.c
@@ -217,9 +217,9 @@ static int config_input(AVFilterLink *inlink)
     }
 
     if (s->duration)
-        s->nb_samples = av_rescale(s->duration, inlink->sample_rate, AV_TIME_BASE);
+        s->nb_samples = av_rescale(s->duration, inlink->sample_rate, av_get_time_base());
     if (s->start_time)
-        s->start_sample = av_rescale(s->start_time, inlink->sample_rate, AV_TIME_BASE);
+        s->start_sample = av_rescale(s->start_time, inlink->sample_rate, av_get_time_base());
 
     return 0;
 }
diff --git a/libavfilter/af_ladspa.c b/libavfilter/af_ladspa.c
index 2057e6d..0e6d5cd 100644
--- a/libavfilter/af_ladspa.c
+++ b/libavfilter/af_ladspa.c
@@ -191,7 +191,7 @@ static int request_frame(AVFilterLink *outlink)
     if (ctx->nb_inputs)
         return ff_request_frame(ctx->inputs[0]);
 
-    t = av_rescale(s->pts, AV_TIME_BASE, s->sample_rate);
+    t = av_rescale(s->pts, av_get_time_base(), s->sample_rate);
     if (s->duration >= 0 && t >= s->duration)
         return AVERROR_EOF;
 
diff --git a/libavfilter/asrc_sine.c b/libavfilter/asrc_sine.c
index 68e1398..e0410ef 100644
--- a/libavfilter/asrc_sine.c
+++ b/libavfilter/asrc_sine.c
@@ -163,7 +163,7 @@ static av_cold int query_formats(AVFilterContext *ctx)
 static av_cold int config_props(AVFilterLink *outlink)
 {
     SineContext *sine = outlink->src->priv;
-    sine->duration = av_rescale(sine->duration, sine->sample_rate, AV_TIME_BASE);
+    sine->duration = av_rescale(sine->duration, sine->sample_rate, av_get_time_base());
     return 0;
 }
 
diff --git a/libavfilter/avf_concat.c b/libavfilter/avf_concat.c
index c211dc4..f1c0d33 100644
--- a/libavfilter/avf_concat.c
+++ b/libavfilter/avf_concat.c
@@ -127,7 +127,7 @@ static int config_output(AVFilterLink *outlink)
     AVFilterLink *inlink = ctx->inputs[in_no];
 
     /* enhancement: find a common one */
-    outlink->time_base           = AV_TIME_BASE_Q;
+    outlink->time_base           = av_get_time_base_q();
     outlink->w                   = inlink->w;
     outlink->h                   = inlink->h;
     outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
diff --git a/libavfilter/avfilter.c b/libavfilter/avfilter.c
index 2567ce9..a916fb1 100644
--- a/libavfilter/avfilter.c
+++ b/libavfilter/avfilter.c
@@ -261,7 +261,7 @@ int avfilter_config_links(AVFilterContext *filter)
             switch (link->type) {
             case AVMEDIA_TYPE_VIDEO:
                 if (!link->time_base.num && !link->time_base.den)
-                    link->time_base = inlink ? inlink->time_base : AV_TIME_BASE_Q;
+                    link->time_base = inlink ? inlink->time_base : av_get_time_base_q();
 
                 if (!link->sample_aspect_ratio.num && !link->sample_aspect_ratio.den)
                     link->sample_aspect_ratio = inlink ?
@@ -428,7 +428,7 @@ void ff_update_link_current_pts(AVFilterLink *link, int64_t pts)
 {
     if (pts == AV_NOPTS_VALUE)
         return;
-    link->current_pts = av_rescale_q(pts, link->time_base, AV_TIME_BASE_Q);
+    link->current_pts = av_rescale_q(pts, link->time_base, av_get_time_base_q());
     /* TODO use duration */
     if (link->graph && link->age_index >= 0)
         ff_avfilter_graph_update_heap(link->graph, link);
diff --git a/libavfilter/avfilter.h b/libavfilter/avfilter.h
index 3518ad8..053954c 100644
--- a/libavfilter/avfilter.h
+++ b/libavfilter/avfilter.h
@@ -766,7 +766,7 @@ struct AVFilterLink {
 
     /**
      * Current timestamp of the link, as defined by the most recent
-     * frame(s), in AV_TIME_BASE units.
+     * frame(s), in av_get_time_base() units.
      */
     int64_t current_pts;
 
diff --git a/libavfilter/f_interleave.c b/libavfilter/f_interleave.c
index 95401cf..b65740c 100644
--- a/libavfilter/f_interleave.c
+++ b/libavfilter/f_interleave.c
@@ -76,7 +76,7 @@ inline static int push_frame(AVFilterContext *ctx)
 
     frame = ff_bufqueue_get(&s->queues[queue_idx]);
     av_log(ctx, AV_LOG_DEBUG, "queue:%d -> frame time:%f\n",
-           queue_idx, frame->pts * av_q2d(AV_TIME_BASE_Q));
+           queue_idx, frame->pts * av_q2d(av_get_time_base_q()));
     return ff_filter_frame(ctx->outputs[0], frame);
 }
 
@@ -94,9 +94,9 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
     }
 
     /* queue frame */
-    frame->pts = av_rescale_q(frame->pts, inlink->time_base, AV_TIME_BASE_Q);
+    frame->pts = av_rescale_q(frame->pts, inlink->time_base, av_get_time_base_q());
     av_log(ctx, AV_LOG_DEBUG, "frame pts:%f -> queue idx:%d available:%d\n",
-           frame->pts * av_q2d(AV_TIME_BASE_Q), in_no, s->queues[in_no].available);
+           frame->pts * av_q2d(av_get_time_base_q()), in_no, s->queues[in_no].available);
     ff_bufqueue_add(ctx, &s->queues[in_no], frame);
 
     return push_frame(ctx);
@@ -154,7 +154,7 @@ static int config_output(AVFilterLink *outlink)
     int i;
 
     if (outlink->type == AVMEDIA_TYPE_VIDEO) {
-        outlink->time_base           = AV_TIME_BASE_Q;
+        outlink->time_base           = av_get_time_base_q();
         outlink->w                   = inlink0->w;
         outlink->h                   = inlink0->h;
         outlink->sample_aspect_ratio = inlink0->sample_aspect_ratio;
diff --git a/libavfilter/f_sendcmd.c b/libavfilter/f_sendcmd.c
index c30f49f..a3f7573 100644
--- a/libavfilter/f_sendcmd.c
+++ b/libavfilter/f_sendcmd.c
@@ -450,7 +450,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *ref)
     if (ref->pts == AV_NOPTS_VALUE)
         goto end;
 
-    ts = av_rescale_q(ref->pts, inlink->time_base, AV_TIME_BASE_Q);
+    ts = av_rescale_q(ref->pts, inlink->time_base, av_get_time_base_q());
 
 #define WITHIN_INTERVAL(ts, start_ts, end_ts) ((ts) >= (start_ts) && (ts) < (end_ts))
 
diff --git a/libavfilter/f_settb.c b/libavfilter/f_settb.c
index d511c14..5e6f6ae 100644
--- a/libavfilter/f_settb.c
+++ b/libavfilter/f_settb.c
@@ -38,7 +38,7 @@
 #include "video.h"
 
 static const char *const var_names[] = {
-    "AVTB",   /* default timebase 1/AV_TIME_BASE */
+    "AVTB",   /* default timebase 1/av_get_time_base() */
     "intb",   /* input timebase */
     "sr",     /* sample rate */
     NULL
@@ -76,7 +76,7 @@ static int config_output_props(AVFilterLink *outlink)
     int ret;
     double res;
 
-    settb->var_values[VAR_AVTB] = av_q2d(AV_TIME_BASE_Q);
+    settb->var_values[VAR_AVTB] = av_q2d(av_get_time_base_q());
     settb->var_values[VAR_INTB] = av_q2d(inlink->time_base);
     settb->var_values[VAR_SR]   = inlink->sample_rate;
 
diff --git a/libavfilter/framesync.c b/libavfilter/framesync.c
index 12db50c..24b3498 100644
--- a/libavfilter/framesync.c
+++ b/libavfilter/framesync.c
@@ -80,13 +80,13 @@ int ff_framesync_configure(FFFrameSync *fs)
                 if (fs->time_base.num) {
                     gcd = av_gcd(fs->time_base.den, fs->in[i].time_base.den);
                     lcm = (fs->time_base.den / gcd) * fs->in[i].time_base.den;
-                    if (lcm < AV_TIME_BASE / 2) {
+                    if (lcm < av_get_time_base() / 2) {
                         fs->time_base.den = lcm;
                         fs->time_base.num = av_gcd(fs->time_base.num,
                                                    fs->in[i].time_base.num);
                     } else {
                         fs->time_base.num = 1;
-                        fs->time_base.den = AV_TIME_BASE;
+                        fs->time_base.den = av_get_time_base();
                         break;
                     }
                 } else {
diff --git a/libavfilter/trim.c b/libavfilter/trim.c
index 04e82da..a5c6f3c 100644
--- a/libavfilter/trim.c
+++ b/libavfilter/trim.c
@@ -99,17 +99,17 @@ static int config_input(AVFilterLink *inlink)
         s->duration = s->duration_dbl * 1e6;
 
     if (s->start_time != INT64_MAX) {
-        int64_t start_pts = av_rescale_q(s->start_time, AV_TIME_BASE_Q, tb);
+        int64_t start_pts = av_rescale_q(s->start_time, av_get_time_base_q(), tb);
         if (s->start_pts == AV_NOPTS_VALUE || start_pts < s->start_pts)
             s->start_pts = start_pts;
     }
     if (s->end_time != INT64_MAX) {
-        int64_t end_pts = av_rescale_q(s->end_time, AV_TIME_BASE_Q, tb);
+        int64_t end_pts = av_rescale_q(s->end_time, av_get_time_base_q(), tb);
         if (s->end_pts == AV_NOPTS_VALUE || end_pts > s->end_pts)
             s->end_pts = end_pts;
     }
     if (s->duration)
-        s->duration_tb = av_rescale_q(s->duration, AV_TIME_BASE_Q, tb);
+        s->duration_tb = av_rescale_q(s->duration, av_get_time_base_q(), tb);
 
     return 0;
 }
diff --git a/libavfilter/vf_fade.c b/libavfilter/vf_fade.c
index cc10b12..6de350a 100644
--- a/libavfilter/vf_fade.c
+++ b/libavfilter/vf_fade.c
@@ -87,8 +87,8 @@ static av_cold int init(AVFilterContext *ctx)
     if (s->start_time || s->duration) {
         av_log(ctx, AV_LOG_VERBOSE,
                "type:%s start_time:%f duration:%f alpha:%d\n",
-               s->type == FADE_IN ? "in" : "out", (s->start_time / (double)AV_TIME_BASE),
-               (s->duration / (double)AV_TIME_BASE),s->alpha);
+               s->type == FADE_IN ? "in" : "out", (s->start_time / (double)av_get_time_base()),
+               (s->duration / (double)av_get_time_base()),s->alpha);
     }
 
     s->black_fade = !memcmp(s->color_rgba, "\x00\x00\x00\xff", 4);
@@ -276,14 +276,14 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
     // Calculate Fade assuming this is a Fade In
     if (s->fade_state == VF_FADE_WAITING) {
         s->factor=0;
-        if (frame_timestamp >= s->start_time/(double)AV_TIME_BASE
+        if (frame_timestamp >= s->start_time/(double)av_get_time_base()
             && inlink->frame_count >= s->start_frame) {
             // Time to start fading
             s->fade_state = VF_FADE_FADING;
 
             // Save start time in case we are starting based on frames and fading based on time
             if (s->start_time == 0 && s->start_frame != 0) {
-                s->start_time = frame_timestamp*(double)AV_TIME_BASE;
+                s->start_time = frame_timestamp*(double)av_get_time_base();
             }
 
             // Save start frame in case we are starting based on time and fading based on frames
@@ -302,10 +302,10 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
 
         } else {
             // Fading based on duration
-            s->factor = (frame_timestamp - s->start_time/(double)AV_TIME_BASE)
-                            * (float) UINT16_MAX / (s->duration/(double)AV_TIME_BASE);
-            if (frame_timestamp > s->start_time/(double)AV_TIME_BASE
-                                  + s->duration/(double)AV_TIME_BASE) {
+            s->factor = (frame_timestamp - s->start_time/(double)av_get_time_base())
+                            * (float) UINT16_MAX / (s->duration/(double)av_get_time_base());
+            if (frame_timestamp > s->start_time/(double)av_get_time_base()
+                                  + s->duration/(double)av_get_time_base()) {
                 s->fade_state = VF_FADE_DONE;
             }
         }
diff --git a/libavfilter/vf_fps.c b/libavfilter/vf_fps.c
index e6266cc..4bf80c7 100644
--- a/libavfilter/vf_fps.c
+++ b/libavfilter/vf_fps.c
@@ -186,12 +186,12 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
                 return ret;
 
             if (s->start_time != DBL_MAX && s->start_time != AV_NOPTS_VALUE) {
-                double first_pts = s->start_time * AV_TIME_BASE;
+                double first_pts = s->start_time * av_get_time_base();
                 first_pts = FFMIN(FFMAX(first_pts, INT64_MIN), INT64_MAX);
-                s->first_pts = s->pts = av_rescale_q(first_pts, AV_TIME_BASE_Q,
+                s->first_pts = s->pts = av_rescale_q(first_pts, av_get_time_base_q(),
                                                      inlink->time_base);
                 av_log(ctx, AV_LOG_VERBOSE, "Set first pts to (in:%"PRId64" out:%"PRId64")\n",
-                       s->first_pts, av_rescale_q(first_pts, AV_TIME_BASE_Q,
+                       s->first_pts, av_rescale_q(first_pts, av_get_time_base_q(),
                                                   outlink->time_base));
             } else {
                 s->first_pts = s->pts = buf->pts;
diff --git a/libavfilter/vf_hue.c b/libavfilter/vf_hue.c
index 7843673..f6cf0e6 100644
--- a/libavfilter/vf_hue.c
+++ b/libavfilter/vf_hue.c
@@ -41,7 +41,7 @@
 
 static const char *const var_names[] = {
     "n",   // frame count
-    "pts", // presentation timestamp expressed in AV_TIME_BASE units
+    "pts", // presentation timestamp expressed in av_get_time_base() units
     "r",   // frame rate
     "t",   // timestamp expressed in seconds
     "tb",  // timebase
diff --git a/libavfilter/vf_vignette.c b/libavfilter/vf_vignette.c
index 8ce7b7e..8ef0e89 100644
--- a/libavfilter/vf_vignette.c
+++ b/libavfilter/vf_vignette.c
@@ -33,7 +33,7 @@ static const char *const var_names[] = {
     "w",    // stream width
     "h",    // stream height
     "n",    // frame count
-    "pts",  // presentation timestamp expressed in AV_TIME_BASE units
+    "pts",  // presentation timestamp expressed in av_get_time_base() units
     "r",    // frame rate
     "t",    // timestamp expressed in seconds
     "tb",   // timebase
diff --git a/libavfilter/vsrc_mptestsrc.c b/libavfilter/vsrc_mptestsrc.c
index d045704..abbb4ec 100644
--- a/libavfilter/vsrc_mptestsrc.c
+++ b/libavfilter/vsrc_mptestsrc.c
@@ -258,7 +258,7 @@ static av_cold int init(AVFilterContext *ctx)
     MPTestContext *test = ctx->priv;
 
     test->max_pts = test->duration >= 0 ?
-        av_rescale_q(test->duration, AV_TIME_BASE_Q, av_inv_q(test->frame_rate)) : -1;
+        av_rescale_q(test->duration, av_get_time_base_q(), av_inv_q(test->frame_rate)) : -1;
     test->pts = 0;
 
     av_log(ctx, AV_LOG_VERBOSE, "rate:%d/%d duration:%f\n",
diff --git a/libavfilter/vsrc_testsrc.c b/libavfilter/vsrc_testsrc.c
index 0ad1474..5073784 100644
--- a/libavfilter/vsrc_testsrc.c
+++ b/libavfilter/vsrc_testsrc.c
@@ -138,7 +138,7 @@ static int request_frame(AVFilterLink *outlink)
     AVFrame *frame;
 
     if (test->duration >= 0 &&
-        av_rescale_q(test->pts, test->time_base, AV_TIME_BASE_Q) >= test->duration)
+        av_rescale_q(test->pts, test->time_base, av_get_time_base_q()) >= test->duration)
         return AVERROR_EOF;
 
     if (test->draw_once) {
diff --git a/libavformat/avformat.h b/libavformat/avformat.h
index 52eef0d..2506793 100644
--- a/libavformat/avformat.h
+++ b/libavformat/avformat.h
@@ -1015,13 +1015,13 @@ typedef struct AVFormatContext {
 
     /**
      * Decoding: position of the first frame of the component, in
-     * AV_TIME_BASE fractional seconds. NEVER set this value directly:
+     * internal time base fractional seconds. NEVER set this value directly:
      * It is deduced from the AVStream values.
      */
     int64_t start_time;
 
     /**
-     * Decoding: duration of the stream, in AV_TIME_BASE fractional
+     * Decoding: duration of the stream, in internal time base fractional
      * seconds. Only set this value if you know none of the individual stream
      * durations and also do not set any of them. This is deduced from the
      * AVStream values if not set.
@@ -1060,7 +1060,7 @@ typedef struct AVFormatContext {
     unsigned int probesize;
 
     /**
-     * decoding: maximum time (in AV_TIME_BASE units) during which the input should
+     * decoding: maximum time (in internal time base units) during which the input should
      * be analyzed in avformat_find_stream_info().
      */
     int max_analyze_duration;
@@ -1736,9 +1736,9 @@ int av_read_frame(AVFormatContext *s, AVPacket *pkt);
  * 'timestamp' in 'stream_index'.
  * @param stream_index If stream_index is (-1), a default
  * stream is selected, and timestamp is automatically converted
- * from AV_TIME_BASE units to the stream specific time_base.
+ * from internal time units to the stream specific time_base.
  * @param timestamp Timestamp in AVStream.time_base units
- *        or, if no stream is specified, in AV_TIME_BASE units.
+ *        or, if no stream is specified, in internal time base units.
  * @param flags flags which select direction and seeking mode
  * @return >= 0 on success
  */
@@ -1756,7 +1756,7 @@ int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp,
  * If flags contain AVSEEK_FLAG_FRAME, then all timestamps are in frames
  * in the stream with stream_index (this may not be supported by all demuxers).
  * Otherwise all timestamps are in units of the stream selected by stream_index
- * or if stream_index is -1, in AV_TIME_BASE units.
+ * or if stream_index is -1, in internal time base units.
  * If flags contain AVSEEK_FLAG_ANY, then non-keyframes are treated as
  * keyframes (this may not be supported by all demuxers).
  * If flags contain AVSEEK_FLAG_BACKWARD, it is ignored.
diff --git a/libavformat/avidec.c b/libavformat/avidec.c
index 17eb245..b8dca1f 100644
--- a/libavformat/avidec.c
+++ b/libavformat/avidec.c
@@ -534,7 +534,7 @@ static int avi_read_header(AVFormatContext *s)
 
                 dv_dur = avio_rl32(pb);
                 if (ast->scale > 0 && ast->rate > 0 && dv_dur > 0) {
-                    dv_dur     *= AV_TIME_BASE;
+                    dv_dur     *= av_get_time_base();
                     s->duration = av_rescale(dv_dur, ast->scale, ast->rate);
                 }
                 /* else, leave duration alone; timing estimation in utils.c
@@ -959,13 +959,13 @@ static AVStream *get_subtitle_pkt(AVFormatContext *s, AVStream *next_st,
     int i;
 
     next_ts = av_rescale_q(next_ast->frame_offset, next_st->time_base,
-                           AV_TIME_BASE_Q);
+                           av_get_time_base_q());
 
     for (i = 0; i < s->nb_streams; i++) {
         st  = s->streams[i];
         ast = st->priv_data;
         if (st->discard < AVDISCARD_ALL && ast && ast->sub_pkt.data) {
-            ts = av_rescale_q(ast->sub_pkt.dts, st->time_base, AV_TIME_BASE_Q);
+            ts = av_rescale_q(ast->sub_pkt.dts, st->time_base, av_get_time_base_q());
             if (ts <= next_ts && ts < ts_min) {
                 ts_min = ts;
                 sub_st = st;
@@ -1182,7 +1182,7 @@ static int avi_read_packet(AVFormatContext *s, AVPacket *pkt)
 
             ts = av_rescale_q(ts, st->time_base,
                               (AVRational) { FFMAX(1, ast->sample_size),
-                                             AV_TIME_BASE });
+                                             av_get_time_base() });
 
             av_dlog(s, "%"PRId64" %d/%d %"PRId64"\n", ts,
                     st->time_base.num, st->time_base.den, ast->frame_offset);
@@ -1303,7 +1303,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
                     ast->scale,
                     ast->rate,
                     ast->sample_size,
-                    AV_TIME_BASE,
+                    av_get_time_base(),
                     avi->stream_index,
                     size);
             pkt->stream_index = avi->stream_index;
@@ -1354,9 +1354,9 @@ FF_ENABLE_DEPRECATION_WARNINGS
         ast->seek_pos= 0;
 
         if (!avi->non_interleaved && st->nb_index_entries>1 && avi->index_loaded>1) {
-            int64_t dts= av_rescale_q(pkt->dts, st->time_base, AV_TIME_BASE_Q);
+            int64_t dts= av_rescale_q(pkt->dts, st->time_base, av_get_time_base_q());
 
-            if (avi->dts_max - dts > 2*AV_TIME_BASE) {
+            if (avi->dts_max - dts > 2*av_get_time_base()) {
                 avi->non_interleaved= 1;
                 av_log(s, AV_LOG_INFO, "Switching to NI mode, due to poor interleaving\n");
             }else if (avi->dts_max < dts)
@@ -1499,13 +1499,13 @@ static int guess_ni_flag(AVFormatContext *s)
             while (idx[i]<n && st->index_entries[idx[i]].pos < pos)
                 idx[i]++;
             if (idx[i] < n) {
-                min_dts = FFMIN(min_dts, av_rescale_q(st->index_entries[idx[i]].timestamp/FFMAX(ast->sample_size, 1), st->time_base, AV_TIME_BASE_Q));
+                min_dts = FFMIN(min_dts, av_rescale_q(st->index_entries[idx[i]].timestamp/FFMAX(ast->sample_size, 1), st->time_base, av_get_time_base_q()));
                 min_pos = FFMIN(min_pos, st->index_entries[idx[i]].pos);
             }
             if (idx[i])
-                max_dts = FFMAX(max_dts, av_rescale_q(st->index_entries[idx[i]-1].timestamp/FFMAX(ast->sample_size, 1), st->time_base, AV_TIME_BASE_Q));
+                max_dts = FFMAX(max_dts, av_rescale_q(st->index_entries[idx[i]-1].timestamp/FFMAX(ast->sample_size, 1), st->time_base, av_get_time_base_q()));
         }
-        if (max_dts - min_dts > 2*AV_TIME_BASE) {
+        if (max_dts - min_dts > 2*av_get_time_base()) {
             av_free(idx);
             return 1;
         }
diff --git a/libavformat/avio.h b/libavformat/avio.h
index 4f4ac3c..85c8d93 100644
--- a/libavformat/avio.h
+++ b/libavformat/avio.h
@@ -462,12 +462,12 @@ int     avio_pause(AVIOContext *h, int pause);
  * Seek to a given timestamp relative to some component stream.
  * Only meaningful if using a network streaming protocol (e.g. MMS.).
  * @param stream_index The stream index that the timestamp is relative to.
- *        If stream_index is (-1) the timestamp should be in AV_TIME_BASE
+ *        If stream_index is (-1) the timestamp should be in internal time base
  *        units from the beginning of the presentation.
  *        If a stream_index >= 0 is used and the protocol does not support
  *        seeking based on component streams, the call will fail.
  * @param timestamp timestamp in AVStream.time_base units
- *        or if there is no stream specified then in AV_TIME_BASE units.
+ *        or if there is no stream specified then in internal time base units.
  * @param flags Optional combination of AVSEEK_FLAG_BACKWARD, AVSEEK_FLAG_BYTE
  *        and AVSEEK_FLAG_ANY. The protocol may silently ignore
  *        AVSEEK_FLAG_BACKWARD and AVSEEK_FLAG_ANY, but AVSEEK_FLAG_BYTE will
diff --git a/libavformat/concatdec.c b/libavformat/concatdec.c
index 86f82e0..38cef8d 100644
--- a/libavformat/concatdec.c
+++ b/libavformat/concatdec.c
@@ -283,7 +283,7 @@ static int concat_read_packet(AVFormatContext *avf, AVPacket *pkt)
         return ret;
 
     delta = av_rescale_q(cat->cur_file->start_time - cat->avf->start_time,
-                         AV_TIME_BASE_Q,
+                         av_get_time_base_q(),
                          cat->avf->streams[pkt->stream_index]->time_base);
     if (pkt->pts != AV_NOPTS_VALUE)
         pkt->pts += delta;
@@ -314,7 +314,7 @@ static int try_seek(AVFormatContext *avf, int stream,
     if (stream >= 0) {
         if (stream >= cat->avf->nb_streams)
             return AVERROR(EIO);
-        rescale_interval(AV_TIME_BASE_Q, cat->avf->streams[stream]->time_base,
+        rescale_interval(av_get_time_base_q(), cat->avf->streams[stream]->time_base,
                          &min_ts, &ts, &max_ts);
     }
     return avformat_seek_file(cat->avf, stream, min_ts, ts, max_ts, flags);
@@ -329,7 +329,7 @@ static int real_seek(AVFormatContext *avf, int stream,
     if (stream >= 0) {
         if (stream >= avf->nb_streams)
             return AVERROR(EINVAL);
-        rescale_interval(avf->streams[stream]->time_base, AV_TIME_BASE_Q,
+        rescale_interval(avf->streams[stream]->time_base, av_get_time_base_q(),
                          &min_ts, &ts, &max_ts);
     }
 
diff --git a/libavformat/dxa.c b/libavformat/dxa.c
index 5b2d7c0..21583b2 100644
--- a/libavformat/dxa.c
+++ b/libavformat/dxa.c
@@ -141,7 +141,7 @@ static int dxa_read_header(AVFormatContext *s)
     c->readvid = !c->has_sound;
     c->vidpos  = avio_tell(pb);
     s->start_time = 0;
-    s->duration = (int64_t)c->frames * AV_TIME_BASE * num / den;
+    s->duration = (int64_t)c->frames * av_get_time_base() * num / den;
     av_log(s, AV_LOG_DEBUG, "%d frame(s)\n",c->frames);
 
     return 0;
diff --git a/libavformat/ffmetadec.c b/libavformat/ffmetadec.c
index 19c14e4..733b910 100644
--- a/libavformat/ffmetadec.c
+++ b/libavformat/ffmetadec.c
@@ -156,7 +156,7 @@ static int read_header(AVFormatContext *s)
     if (s->nb_chapters)
         s->duration = av_rescale_q(s->chapters[s->nb_chapters - 1]->end,
                                    s->chapters[s->nb_chapters - 1]->time_base,
-                                   AV_TIME_BASE_Q);
+                                   av_get_time_base_q());
 
     return 0;
 }
diff --git a/libavformat/flvdec.c b/libavformat/flvdec.c
index 5683ed5..e99af57 100644
--- a/libavformat/flvdec.c
+++ b/libavformat/flvdec.c
@@ -452,7 +452,7 @@ static int amf_parse_object(AVFormatContext *s, AVStream *astream,
         if (amf_type == AMF_DATA_TYPE_NUMBER ||
             amf_type == AMF_DATA_TYPE_BOOL) {
             if (!strcmp(key, "duration"))
-                s->duration = num_val * AV_TIME_BASE;
+                s->duration = num_val * av_get_time_base();
             else if (!strcmp(key, "videodatarate") && vcodec &&
                      0 <= (int)(num_val * 1024.0))
                 vcodec->bit_rate = num_val * 1024.0;
@@ -882,7 +882,7 @@ retry_duration:
             uint32_t ts = avio_rb24(s->pb);
             ts         |= avio_r8(s->pb) << 24;
             if (ts)
-                s->duration = ts * (int64_t)AV_TIME_BASE / 1000;
+                s->duration = ts * (int64_t)av_get_time_base() / 1000;
             else if (fsize >= 8 && fsize - 8 >= size) {
                 fsize -= size+4;
                 goto retry_duration;
diff --git a/libavformat/flvenc.c b/libavformat/flvenc.c
index fb36f66..0adc614 100644
--- a/libavformat/flvenc.c
+++ b/libavformat/flvenc.c
@@ -302,7 +302,7 @@ static int flv_write_header(AVFormatContext *s)
     flv->duration_offset= avio_tell(pb);
 
     // fill in the guessed duration, it'll be corrected later if incorrect
-    put_amf_double(pb, s->duration / AV_TIME_BASE);
+    put_amf_double(pb, s->duration / av_get_time_base());
 
     if (video_enc) {
         put_amf_string(pb, "width");
diff --git a/libavformat/hdsenc.c b/libavformat/hdsenc.c
index f22875d..f75881e 100644
--- a/libavformat/hdsenc.c
+++ b/libavformat/hdsenc.c
@@ -528,7 +528,7 @@ static int hds_write_packet(AVFormatContext *s, AVPacket *pkt)
 
     if ((!os->has_video || st->codec->codec_type == AVMEDIA_TYPE_VIDEO) &&
         av_compare_ts(pkt->dts - st->first_dts, st->time_base,
-                      end_dts, AV_TIME_BASE_Q) >= 0 &&
+                      end_dts, av_get_time_base_q()) >= 0 &&
         pkt->flags & AV_PKT_FLAG_KEY && os->packets_written) {
 
         if ((ret = hds_flush(s, os, 0, pkt->dts)) < 0)
diff --git a/libavformat/hls.c b/libavformat/hls.c
index 471a62d..11d8a41 100644
--- a/libavformat/hls.c
+++ b/libavformat/hls.c
@@ -278,7 +278,7 @@ static int parse_playlist(HLSContext *c, const char *url,
                     goto fail;
                 }
             }
-            var->target_duration = atoi(ptr) * AV_TIME_BASE;
+            var->target_duration = atoi(ptr) * av_get_time_base();
         } else if (av_strstart(line, "#EXT-X-MEDIA-SEQUENCE:", &ptr)) {
             if (!var) {
                 var = new_variant(c, 0, url, NULL);
@@ -293,7 +293,7 @@ static int parse_playlist(HLSContext *c, const char *url,
                 var->finished = 1;
         } else if (av_strstart(line, "#EXTINF:", &ptr)) {
             is_segment = 1;
-            duration   = atof(ptr) * AV_TIME_BASE;
+            duration   = atof(ptr) * av_get_time_base();
         } else if (av_strstart(line, "#", NULL)) {
             continue;
         } else if (line[0]) {
@@ -697,7 +697,7 @@ start:
                         var->pkt.dts       != AV_NOPTS_VALUE)
                         c->first_timestamp = av_rescale_q(var->pkt.dts,
                             var->ctx->streams[var->pkt.stream_index]->time_base,
-                            AV_TIME_BASE_Q);
+                            av_get_time_base_q());
                 }
 
                 if (c->seek_timestamp == AV_NOPTS_VALUE)
@@ -709,7 +709,7 @@ start:
                 }
 
                 st = var->ctx->streams[var->pkt.stream_index];
-                ts_diff = av_rescale_rnd(var->pkt.dts, AV_TIME_BASE,
+                ts_diff = av_rescale_rnd(var->pkt.dts, av_get_time_base(),
                                          st->time_base.den, AV_ROUND_DOWN) -
                           c->seek_timestamp;
                 if (ts_diff >= 0 && (c->seek_flags  & AVSEEK_FLAG_ANY ||
@@ -782,13 +782,13 @@ static int hls_read_seek(AVFormatContext *s, int stream_index,
 
     c->seek_flags     = flags;
     c->seek_timestamp = stream_index < 0 ? timestamp :
-                        av_rescale_rnd(timestamp, AV_TIME_BASE,
+                        av_rescale_rnd(timestamp, av_get_time_base(),
                                        s->streams[stream_index]->time_base.den,
                                        flags & AVSEEK_FLAG_BACKWARD ?
                                        AV_ROUND_DOWN : AV_ROUND_UP);
-    timestamp = av_rescale_rnd(timestamp, AV_TIME_BASE, stream_index >= 0 ?
+    timestamp = av_rescale_rnd(timestamp, av_get_time_base(), stream_index >= 0 ?
                                s->streams[stream_index]->time_base.den :
-                               AV_TIME_BASE, flags & AVSEEK_FLAG_BACKWARD ?
+                               av_get_time_base(), flags & AVSEEK_FLAG_BACKWARD ?
                                AV_ROUND_DOWN : AV_ROUND_UP);
     if (s->duration < c->seek_timestamp) {
         c->seek_timestamp = AV_NOPTS_VALUE;
diff --git a/libavformat/hlsenc.c b/libavformat/hlsenc.c
index 3b50397..c0d7030 100644
--- a/libavformat/hlsenc.c
+++ b/libavformat/hlsenc.c
@@ -193,7 +193,7 @@ static int hls_write_header(AVFormatContext *s)
 
     hls->number      = 0;
 
-    hls->recording_time = hls->time * AV_TIME_BASE;
+    hls->recording_time = hls->time * av_get_time_base();
     hls->start_pts      = AV_NOPTS_VALUE;
 
     for (i = 0; i < s->nb_streams; i++)
@@ -274,7 +274,7 @@ static int hls_write_packet(AVFormatContext *s, AVPacket *pkt)
                                    st->time_base.num, st->time_base.den);
 
     if (can_split && av_compare_ts(pkt->pts - hls->start_pts, st->time_base,
-                                   end_pts, AV_TIME_BASE_Q) >= 0) {
+                                   end_pts, av_get_time_base_q()) >= 0) {
         ret = append_entry(hls, hls->duration);
         if (ret)
             return ret;
diff --git a/libavformat/hlsproto.c b/libavformat/hlsproto.c
index f6fcbe5..1f4d07b 100644
--- a/libavformat/hlsproto.c
+++ b/libavformat/hlsproto.c
@@ -135,14 +135,14 @@ static int parse_playlist(URLContext *h, const char *url)
                                &info);
             bandwidth = atoi(info.bandwidth);
         } else if (av_strstart(line, "#EXT-X-TARGETDURATION:", &ptr)) {
-            s->target_duration = atoi(ptr) * AV_TIME_BASE;
+            s->target_duration = atoi(ptr) * av_get_time_base();
         } else if (av_strstart(line, "#EXT-X-MEDIA-SEQUENCE:", &ptr)) {
             s->start_seq_no = atoi(ptr);
         } else if (av_strstart(line, "#EXT-X-ENDLIST", &ptr)) {
             s->finished = 1;
         } else if (av_strstart(line, "#EXTINF:", &ptr)) {
             is_segment = 1;
-            duration = atof(ptr) * AV_TIME_BASE;
+            duration = atof(ptr) * av_get_time_base();
         } else if (av_strstart(line, "#", NULL)) {
             continue;
         } else if (line[0]) {
diff --git a/libavformat/librtmp.c b/libavformat/librtmp.c
index 5b4c39d..eb2e629 100644
--- a/libavformat/librtmp.c
+++ b/libavformat/librtmp.c
@@ -174,7 +174,7 @@ static int64_t rtmp_read_seek(URLContext *s, int stream_index,
 
     /* seeks are in milliseconds */
     if (stream_index < 0)
-        timestamp = av_rescale_rnd(timestamp, 1000, AV_TIME_BASE,
+        timestamp = av_rescale_rnd(timestamp, 1000, av_get_time_base(),
             flags & AVSEEK_FLAG_BACKWARD ? AV_ROUND_DOWN : AV_ROUND_UP);
 
     if (!RTMP_SendSeek(r, timestamp))
diff --git a/libavformat/matroskadec.c b/libavformat/matroskadec.c
index e994786..89fd7c1 100644
--- a/libavformat/matroskadec.c
+++ b/libavformat/matroskadec.c
@@ -1585,7 +1585,7 @@ static int matroska_read_header(AVFormatContext *s)
         matroska->time_scale = 1000000;
     if (matroska->duration)
         matroska->ctx->duration = matroska->duration * matroska->time_scale
-                                  * 1000 / AV_TIME_BASE;
+                                  * 1000 / av_get_time_base();
     av_dict_set(&s->metadata, "title", matroska->title, 0);
 
     if (matroska->date_utc.size == 8)
@@ -1782,7 +1782,7 @@ static int matroska_read_header(AVFormatContext *s)
             if (track->audio.out_samplerate < 0 || track->audio.out_samplerate > INT_MAX)
                 return AVERROR_INVALIDDATA;
             avio_wl32(&b, track->audio.out_samplerate);
-            avio_wl32(&b, av_rescale((matroska->duration * matroska->time_scale), track->audio.out_samplerate, AV_TIME_BASE * 1000));
+            avio_wl32(&b, av_rescale((matroska->duration * matroska->time_scale), track->audio.out_samplerate, av_get_time_base() * 1000));
         } else if (codec_id == AV_CODEC_ID_RV10 || codec_id == AV_CODEC_ID_RV20 ||
                    codec_id == AV_CODEC_ID_RV30 || codec_id == AV_CODEC_ID_RV40) {
             extradata_offset = 26;
diff --git a/libavformat/mov.c b/libavformat/mov.c
index 8dc6062..a35a0b2 100644
--- a/libavformat/mov.c
+++ b/libavformat/mov.c
@@ -891,7 +891,7 @@ static int mov_read_mvhd(MOVContext *c, AVIOContext *pb, MOVAtom atom)
     // set the AVCodecContext duration because the duration of individual tracks
     // may be inaccurate
     if (c->time_scale > 0 && !c->trex_data)
-        c->fc->duration = av_rescale(c->duration, AV_TIME_BASE, c->time_scale);
+        c->fc->duration = av_rescale(c->duration, av_get_time_base(), c->time_scale);
     avio_rb32(pb); /* preferred scale */
 
     avio_rb16(pb); /* preferred volume */
@@ -3432,13 +3432,13 @@ static AVIndexEntry *mov_find_next_sample(AVFormatContext *s, AVStream **st)
         MOVStreamContext *msc = avst->priv_data;
         if (msc->pb && msc->current_sample < avst->nb_index_entries) {
             AVIndexEntry *current_sample = &avst->index_entries[msc->current_sample];
-            int64_t dts = av_rescale(current_sample->timestamp, AV_TIME_BASE, msc->time_scale);
+            int64_t dts = av_rescale(current_sample->timestamp, av_get_time_base(), msc->time_scale);
             av_dlog(s, "stream %d, sample %d, dts %"PRId64"\n", i, msc->current_sample, dts);
             if (!sample || (!s->pb->seekable && current_sample->pos < sample->pos) ||
                 (s->pb->seekable &&
                  ((msc->pb != s->pb && dts < best_dts) || (msc->pb == s->pb &&
-                 ((FFABS(best_dts - dts) <= AV_TIME_BASE && current_sample->pos < sample->pos) ||
-                  (FFABS(best_dts - dts) > AV_TIME_BASE && dts < best_dts)))))) {
+                 ((FFABS(best_dts - dts) <= av_get_time_base() && current_sample->pos < sample->pos) ||
+                  (FFABS(best_dts - dts) > av_get_time_base() && dts < best_dts)))))) {
                 sample = current_sample;
                 best_dts = dts;
                 *st = avst;
diff --git a/libavformat/movenc.c b/libavformat/movenc.c
index a886ab0..7142e9e 100644
--- a/libavformat/movenc.c
+++ b/libavformat/movenc.c
@@ -3411,7 +3411,7 @@ static int mov_write_single_packet(AVFormatContext *s, AVPacket *pkt)
         if (trk->entry && pkt->stream_index < s->nb_streams)
             frag_duration = av_rescale_q(pkt->dts - trk->cluster[0].dts,
                                          s->streams[pkt->stream_index]->time_base,
-                                         AV_TIME_BASE_Q);
+                                         av_get_time_base_q());
         if ((mov->max_fragment_duration &&
              frag_duration >= mov->max_fragment_duration) ||
              (mov->max_fragment_size && mov->mdat_size + size >= mov->max_fragment_size) ||
diff --git a/libavformat/mpeg.c b/libavformat/mpeg.c
index 1777283..c124301 100644
--- a/libavformat/mpeg.c
+++ b/libavformat/mpeg.c
@@ -908,12 +908,12 @@ static int vobsub_read_seek(AVFormatContext *s, int stream_index,
     if (stream_index == -1 && s->nb_streams != 1) {
         int i, ret = 0;
         AVRational time_base = s->streams[0]->time_base;
-        ts = av_rescale_q(ts, AV_TIME_BASE_Q, time_base);
+        ts = av_rescale_q(ts, av_get_time_base_q(), time_base);
         min_ts = av_rescale_rnd(min_ts, time_base.den,
-                                time_base.num * (int64_t)AV_TIME_BASE,
+                                time_base.num * (int64_t)av_get_time_base(),
                                 AV_ROUND_UP   | AV_ROUND_PASS_MINMAX);
         max_ts = av_rescale_rnd(max_ts, time_base.den,
-                                time_base.num * (int64_t)AV_TIME_BASE,
+                                time_base.num * (int64_t)av_get_time_base(),
                                 AV_ROUND_DOWN | AV_ROUND_PASS_MINMAX);
         for (i = 0; i < s->nb_streams; i++) {
             int r = ff_subtitles_queue_seek(&vobsub->q[i], s, stream_index,
diff --git a/libavformat/mpegenc.c b/libavformat/mpegenc.c
index ccf3ec2..39d59da 100644
--- a/libavformat/mpegenc.c
+++ b/libavformat/mpegenc.c
@@ -322,7 +322,7 @@ static av_cold int mpeg_mux_init(AVFormatContext *ctx)
     } else
         s->packet_size = 2048;
     if (ctx->max_delay < 0) /* Not set by the caller */
-        ctx->max_delay = 0.7*AV_TIME_BASE;
+        ctx->max_delay = 0.7*av_get_time_base();
 
     s->vcd_padding_bytes_written = 0;
     s->vcd_padding_bitrate=0;
@@ -948,7 +948,7 @@ static int output_packet(AVFormatContext *ctx, int flush){
     int ignore_constraints=0;
     int64_t scr= s->last_scr;
     PacketDesc *timestamp_packet;
-    const int64_t max_delay= av_rescale(ctx->max_delay, 90000, AV_TIME_BASE);
+    const int64_t max_delay= av_rescale(ctx->max_delay, 90000, av_get_time_base());
 
 retry:
     for(i=0; i<ctx->nb_streams; i++){
@@ -1077,7 +1077,7 @@ static int mpeg_mux_write_packet(AVFormatContext *ctx, AVPacket *pkt)
     int preload;
     const int is_iframe = st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (pkt->flags & AV_PKT_FLAG_KEY);
 
-    preload = av_rescale(s->preload, 90000, AV_TIME_BASE);
+    preload = av_rescale(s->preload, 90000, av_get_time_base());
 
     pts= pkt->pts;
     dts= pkt->dts;
@@ -1085,13 +1085,13 @@ static int mpeg_mux_write_packet(AVFormatContext *ctx, AVPacket *pkt)
     if (s->last_scr == AV_NOPTS_VALUE) {
         if (dts == AV_NOPTS_VALUE || (dts < preload && ctx->avoid_negative_ts) || s->is_dvd) {
             if (dts != AV_NOPTS_VALUE)
-                s->preload += av_rescale(-dts, AV_TIME_BASE, 90000);
+                s->preload += av_rescale(-dts, av_get_time_base(), 90000);
             s->last_scr = 0;
         } else {
             s->last_scr = dts - preload;
             s->preload = 0;
         }
-        preload = av_rescale(s->preload, 90000, AV_TIME_BASE);
+        preload = av_rescale(s->preload, 90000, av_get_time_base());
         av_log(ctx, AV_LOG_DEBUG, "First SCR: %"PRId64" First DTS: %"PRId64"\n", s->last_scr, dts + preload);
     }
 
diff --git a/libavformat/mpegtsenc.c b/libavformat/mpegtsenc.c
index 1d51b97..9a75a94 100644
--- a/libavformat/mpegtsenc.c
+++ b/libavformat/mpegtsenc.c
@@ -657,7 +657,7 @@ static int mpegts_write_header(AVFormatContext *s)
             (TS_PACKET_SIZE * 8 * 1000);
 
         if(ts->copyts < 1)
-            ts->first_pcr = av_rescale(s->max_delay, PCR_TIME_BASE, AV_TIME_BASE);
+            ts->first_pcr = av_rescale(s->max_delay, PCR_TIME_BASE, av_get_time_base());
     } else {
         /* Arbitrary values, PAT/PMT will also be written on video key frames */
         ts->sdt_packet_period = 200;
@@ -862,7 +862,7 @@ static void mpegts_write_pes(AVFormatContext *s, AVStream *st,
     int val, is_start, len, header_len, write_pcr, private_code, flags;
     int afc_len, stuffing_len;
     int64_t pcr = -1; /* avoid warning */
-    int64_t delay = av_rescale(s->max_delay, 90000, AV_TIME_BASE);
+    int64_t delay = av_rescale(s->max_delay, 90000, av_get_time_base());
     int force_pat = st->codec->codec_type == AVMEDIA_TYPE_VIDEO && key && !ts_st->prev_payload_key;
 
     is_start = 1;
@@ -1075,7 +1075,7 @@ static int mpegts_write_packet_internal(AVFormatContext *s, AVPacket *pkt)
     uint8_t *data= NULL;
     MpegTSWrite *ts = s->priv_data;
     MpegTSWriteStream *ts_st = st->priv_data;
-    const int64_t delay = av_rescale(s->max_delay, 90000, AV_TIME_BASE)*2;
+    const int64_t delay = av_rescale(s->max_delay, 90000, av_get_time_base())*2;
     int64_t dts = pkt->dts, pts = pkt->pts;
 
     if (ts->reemit_pat_pmt) {
diff --git a/libavformat/mux.c b/libavformat/mux.c
index f01b82b..0017dc8 100644
--- a/libavformat/mux.c
+++ b/libavformat/mux.c
@@ -606,7 +606,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
     }
 
     if (chunked) {
-        uint64_t max= av_rescale_q_rnd(s->max_chunk_duration, AV_TIME_BASE_Q, st->time_base, AV_ROUND_UP);
+        uint64_t max= av_rescale_q_rnd(s->max_chunk_duration, av_get_time_base_q(), st->time_base, AV_ROUND_UP);
         st->interleaver_chunk_size     += pkt->size;
         st->interleaver_chunk_duration += pkt->duration;
         if (   (s->max_chunk_size && st->interleaver_chunk_size > s->max_chunk_size)
@@ -657,11 +657,11 @@ static int interleave_compare_dts(AVFormatContext *s, AVPacket *next,
     int comp      = av_compare_ts(next->dts, st2->time_base, pkt->dts,
                                   st->time_base);
     if (s->audio_preload && ((st->codec->codec_type == AVMEDIA_TYPE_AUDIO) != (st2->codec->codec_type == AVMEDIA_TYPE_AUDIO))) {
-        int64_t ts = av_rescale_q(pkt ->dts, st ->time_base, AV_TIME_BASE_Q) - s->audio_preload*(st ->codec->codec_type == AVMEDIA_TYPE_AUDIO);
-        int64_t ts2= av_rescale_q(next->dts, st2->time_base, AV_TIME_BASE_Q) - s->audio_preload*(st2->codec->codec_type == AVMEDIA_TYPE_AUDIO);
+        int64_t ts = av_rescale_q(pkt ->dts, st ->time_base, av_get_time_base_q()) - s->audio_preload*(st ->codec->codec_type == AVMEDIA_TYPE_AUDIO);
+        int64_t ts2= av_rescale_q(next->dts, st2->time_base, av_get_time_base_q()) - s->audio_preload*(st2->codec->codec_type == AVMEDIA_TYPE_AUDIO);
         if (ts == ts2) {
-            ts= ( pkt ->dts* st->time_base.num*AV_TIME_BASE - s->audio_preload*(int64_t)(st ->codec->codec_type == AVMEDIA_TYPE_AUDIO)* st->time_base.den)*st2->time_base.den
-               -( next->dts*st2->time_base.num*AV_TIME_BASE - s->audio_preload*(int64_t)(st2->codec->codec_type == AVMEDIA_TYPE_AUDIO)*st2->time_base.den)* st->time_base.den;
+            ts= ( pkt ->dts* st->time_base.num*av_get_time_base() - s->audio_preload*(int64_t)(st ->codec->codec_type == AVMEDIA_TYPE_AUDIO)* st->time_base.den)*st2->time_base.den
+               -( next->dts*st2->time_base.num*av_get_time_base() - s->audio_preload*(int64_t)(st2->codec->codec_type == AVMEDIA_TYPE_AUDIO)*st2->time_base.den)* st->time_base.den;
             ts2=0;
         }
         comp= (ts>ts2) - (ts<ts2);
@@ -702,15 +702,15 @@ int ff_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out,
                 int64_t delta_dts =
                     av_rescale_q(s->streams[i]->last_in_packet_buffer->pkt.dts,
                                 s->streams[i]->time_base,
-                                AV_TIME_BASE_Q) -
+                                av_get_time_base_q()) -
                     av_rescale_q(s->packet_buffer->pkt.dts,
                                 s->streams[s->packet_buffer->pkt.stream_index]->time_base,
-                                AV_TIME_BASE_Q);
+                                av_get_time_base_q());
                 delta_dts_max= FFMAX(delta_dts_max, delta_dts);
             }
         }
         if (s->nb_streams == stream_count+noninterleaved_count &&
-           delta_dts_max > 20*AV_TIME_BASE) {
+           delta_dts_max > 20*av_get_time_base()) {
             av_log(s, AV_LOG_DEBUG, "flushing with %d noninterleaved\n", noninterleaved_count);
             flush = 1;
         }
diff --git a/libavformat/nutdec.c b/libavformat/nutdec.c
index ba9ff02..c503a96 100644
--- a/libavformat/nutdec.c
+++ b/libavformat/nutdec.c
@@ -581,7 +581,7 @@ static int decode_syncpoint(NUTContext *nut, int64_t *ts, int64_t *back_ptr)
     }
 
     *ts = tmp / nut->time_base_count *
-          av_q2d(nut->time_base[tmp % nut->time_base_count]) * AV_TIME_BASE;
+          av_q2d(nut->time_base[tmp % nut->time_base_count]) * av_get_time_base();
 
     if ((ret = ff_nut_add_sp(nut, nut->last_syncpoint_pos, *back_ptr, *ts)) < 0)
         return ret;
@@ -633,7 +633,7 @@ static int find_and_decode_index(NUTContext *nut)
     max_pts = ffio_read_varlen(bc);
     s->duration = av_rescale_q(max_pts / nut->time_base_count,
                                nut->time_base[max_pts % nut->time_base_count],
-                               AV_TIME_BASE_Q);
+                               av_get_time_base_q());
     s->duration_estimation_method = AVFMT_DURATION_FROM_PTS;
 
     GET_V(syncpoint_count, tmp < INT_MAX / 8 && tmp > 0);
@@ -982,7 +982,7 @@ static int read_seek(AVFormatContext *s, int stream_index,
 {
     NUTContext *nut    = s->priv_data;
     AVStream *st       = s->streams[stream_index];
-    Syncpoint dummy    = { .ts = pts * av_q2d(st->time_base) * AV_TIME_BASE };
+    Syncpoint dummy    = { .ts = pts * av_q2d(st->time_base) * av_get_time_base() };
     Syncpoint nopts_sp = { .ts = AV_NOPTS_VALUE, .back_ptr = AV_NOPTS_VALUE };
     Syncpoint *sp, *next_node[2] = { &nopts_sp, &nopts_sp };
     int64_t pos, pos2, ts;
diff --git a/libavformat/oggenc.c b/libavformat/oggenc.c
index d9ef23c..0792485 100644
--- a/libavformat/oggenc.c
+++ b/libavformat/oggenc.c
@@ -157,9 +157,9 @@ static int ogg_compare_granule(AVFormatContext *s, OGGPage *next, OGGPage *page)
         return 0;
 
     next_granule = av_rescale_q(ogg_granule_to_timestamp(st2->priv_data, next->granule),
-                                st2->time_base, AV_TIME_BASE_Q);
+                                st2->time_base, av_get_time_base_q());
     cur_granule  = av_rescale_q(ogg_granule_to_timestamp(st->priv_data, page->granule),
-                                st ->time_base, AV_TIME_BASE_Q);
+                                st ->time_base, av_get_time_base_q());
     return next_granule > cur_granule;
 }
 
@@ -253,9 +253,9 @@ static int ogg_buffer_data(AVFormatContext *s, AVStream *st,
             AVStream *st = s->streams[page->stream_index];
 
             int64_t start = av_rescale_q(page->start_granule, st->time_base,
-                                         AV_TIME_BASE_Q);
+                                         av_get_time_base_q());
             int64_t next  = av_rescale_q(page->granule, st->time_base,
-                                         AV_TIME_BASE_Q);
+                                         av_get_time_base_q());
 
             if (page->segments_count == 255 ||
                 (ogg->pref_size     > 0 && page->size   >= ogg->pref_size) ||
diff --git a/libavformat/options_table.h b/libavformat/options_table.h
index 8145325..d7168a9 100644
--- a/libavformat/options_table.h
+++ b/libavformat/options_table.h
@@ -50,7 +50,7 @@ static const AVOption avformat_options[] = {
 {"latm", "enable RTP MP4A-LATM payload", 0, AV_OPT_TYPE_CONST, {.i64 = AVFMT_FLAG_MP4A_LATM }, INT_MIN, INT_MAX, E, "fflags"},
 {"nobuffer", "reduce the latency introduced by optional buffering", 0, AV_OPT_TYPE_CONST, {.i64 = AVFMT_FLAG_NOBUFFER }, 0, INT_MAX, D, "fflags"},
 {"seek2any", "allow seeking to non-keyframes on demuxer level when supported", OFFSET(seek2any), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, D},
-{"analyzeduration", "specify how many microseconds are analyzed to probe the input", OFFSET(max_analyze_duration), AV_OPT_TYPE_INT, {.i64 = 5*AV_TIME_BASE }, 0, INT_MAX, D},
+{"analyzeduration", "specify how many microseconds are analyzed to probe the input", OFFSET(max_analyze_duration), AV_OPT_TYPE_INT, {.i64 = 5000000 }, 0, INT_MAX, D},
 {"cryptokey", "decryption key", OFFSET(key), AV_OPT_TYPE_BINARY, {.dbl = 0}, 0, 0, D},
 {"indexmem", "max memory used for timestamp index (per stream)", OFFSET(max_index_size), AV_OPT_TYPE_INT, {.i64 = 1<<20 }, 0, INT_MAX, D},
 {"rtbufsize", "max memory used for buffering real-time frames", OFFSET(max_picture_buffer), AV_OPT_TYPE_INT, {.i64 = 3041280 }, 0, INT_MAX, D}, /* defaults to 1s of 15fps 352x288 YUYV422 video */
diff --git a/libavformat/rmdec.c b/libavformat/rmdec.c
index 8feef54..0ed22e7 100644
--- a/libavformat/rmdec.c
+++ b/libavformat/rmdec.c
@@ -515,7 +515,7 @@ static int rm_read_header(AVFormatContext *s)
             avio_rb32(pb); /* avg packet size */
             avio_rb32(pb); /* nb packets */
             duration = avio_rb32(pb); /* duration */
-            s->duration = av_rescale(duration, AV_TIME_BASE, 1000);
+            s->duration = av_rescale(duration, av_get_time_base(), 1000);
             avio_rb32(pb); /* preroll */
             indx_off = avio_rb32(pb); /* index offset */
             data_off = avio_rb32(pb); /* data offset */
diff --git a/libavformat/rtpdec.c b/libavformat/rtpdec.c
index ed118b0..7a377e3 100644
--- a/libavformat/rtpdec.c
+++ b/libavformat/rtpdec.c
@@ -322,7 +322,7 @@ int ff_rtp_check_and_send_back_rr(RTPDemuxContext *s, URLContext *fd,
     } else {
         uint32_t middle_32_bits   = s->last_rtcp_ntp_time >> 16; // this is valid, right? do we need to handle 64 bit values special?
         uint32_t delay_since_last = av_rescale(av_gettime() - s->last_rtcp_reception_time,
-                                               65536, AV_TIME_BASE);
+                                               65536, av_get_time_base());
 
         avio_wb32(pb, middle_32_bits); /* last SR timestamp */
         avio_wb32(pb, delay_since_last); /* delay since last SR */
@@ -768,7 +768,7 @@ static int rtp_parse_one_packet(RTPDemuxContext *s, AVPacket *pkt,
 
     if (s->st) {
         int64_t received = av_gettime();
-        uint32_t arrival_ts = av_rescale_q(received, AV_TIME_BASE_Q,
+        uint32_t arrival_ts = av_rescale_q(received, av_get_time_base_q(),
                                            s->st->time_base);
         timestamp = AV_RB32(buf + 4);
         // Calculate the jitter immediately, before queueing the packet
diff --git a/libavformat/rtpenc.c b/libavformat/rtpenc.c
index f010008..b174fe3 100644
--- a/libavformat/rtpenc.c
+++ b/libavformat/rtpenc.c
@@ -161,7 +161,7 @@ static int rtp_write_header(AVFormatContext *s1)
             } else {
                 s->max_frames_per_packet =
                         av_rescale_q_rnd(s1->max_delay,
-                                         AV_TIME_BASE_Q,
+                                         av_get_time_base_q(),
                                          (AVRational){ frame_size, st->codec->sample_rate },
                                          AV_ROUND_DOWN);
             }
diff --git a/libavformat/rtsp.h b/libavformat/rtsp.h
index 76c7f18..4da31b3 100644
--- a/libavformat/rtsp.h
+++ b/libavformat/rtsp.h
@@ -133,7 +133,7 @@ typedef struct RTSPMessageHeader {
     int nb_transports;
 
     /** Time range of the streams that the server will stream. In
-     * AV_TIME_BASE unit, AV_NOPTS_VALUE if not used */
+     * internal time base units, AV_NOPTS_VALUE if not used */
     int64_t range_start, range_end;
 
     /** describes the complete "Transport:" line of the server in response
diff --git a/libavformat/rtspdec.c b/libavformat/rtspdec.c
index 74a7bf6..aee0740 100644
--- a/libavformat/rtspdec.c
+++ b/libavformat/rtspdec.c
@@ -521,8 +521,8 @@ static int rtsp_read_play(AVFormatContext *s)
         } else {
             snprintf(cmd, sizeof(cmd),
                      "Range: npt=%"PRId64".%03"PRId64"-\r\n",
-                     rt->seek_timestamp / AV_TIME_BASE,
-                     rt->seek_timestamp / (AV_TIME_BASE / 1000) % 1000);
+                     rt->seek_timestamp / av_get_time_base(),
+                     rt->seek_timestamp / (av_get_time_base() / 1000) % 1000);
         }
         ff_rtsp_send_cmd(s, "PLAY", rt->control_uri, cmd, reply, NULL);
         if (reply->status_code != RTSP_STATUS_OK) {
@@ -538,7 +538,7 @@ static int rtsp_read_play(AVFormatContext *s)
                     continue;
                 st = s->streams[rtsp_st->stream_index];
                 rtpctx->range_start_offset =
-                    av_rescale_q(reply->range_start, AV_TIME_BASE_Q,
+                    av_rescale_q(reply->range_start, av_get_time_base_q(),
                                  st->time_base);
             }
         }
@@ -896,7 +896,7 @@ static int rtsp_read_seek(AVFormatContext *s, int stream_index,
 
     rt->seek_timestamp = av_rescale_q(timestamp,
                                       s->streams[stream_index]->time_base,
-                                      AV_TIME_BASE_Q);
+                                      av_get_time_base_q());
     switch(rt->state) {
     default:
     case RTSP_STATE_IDLE:
diff --git a/libavformat/sbgdec.c b/libavformat/sbgdec.c
index 36cd8a3..4084746 100644
--- a/libavformat/sbgdec.c
+++ b/libavformat/sbgdec.c
@@ -30,7 +30,7 @@
 
 #define SBG_SCALE (1 << 16)
 #define DAY (24 * 60 * 60)
-#define DAY_TS ((int64_t)DAY * AV_TIME_BASE)
+#define DAY_TS ((int64_t)DAY * av_get_time_base())
 
 struct sbg_demuxer {
     AVClass *class;
@@ -196,7 +196,7 @@ static int str_to_time(const char *str, int64_t *rtime)
         if (end > cur + 1)
             cur = end;
     }
-    *rtime = (hours * 3600 + minutes * 60 + seconds) * AV_TIME_BASE;
+    *rtime = (hours * 3600 + minutes * 60 + seconds) * av_get_time_base();
     return cur - str;
 }
 
@@ -376,7 +376,7 @@ static int parse_options(struct sbg_parser *p)
                                  "syntax error for option -F");
                         return AVERROR_INVALIDDATA;
                     }
-                    p->scs.opt_fade_time = v * AV_TIME_BASE / 1000;
+                    p->scs.opt_fade_time = v * av_get_time_base() / 1000;
                     break;
                 case 'L':
                     FORWARD_ERROR(parse_optarg(p, opt, &oarg));
@@ -802,7 +802,7 @@ static int parse_script(void *log, char *script, int script_len,
             /* default values */
             .start_ts      = AV_NOPTS_VALUE,
             .sample_rate   = 44100,
-            .opt_fade_time = 60 * AV_TIME_BASE,
+            .opt_fade_time = 60 * av_get_time_base(),
         },
     };
     int r;
@@ -917,7 +917,7 @@ static void expand_timestamps(void *log, struct sbg_script *s)
                    now0 % DAY;
         av_log(log, AV_LOG_INFO, "Using %02d:%02d:%02d as NOW.\n",
                (int)(now / 3600), (int)(now / 60) % 60, (int)now % 60);
-        now *= AV_TIME_BASE;
+        now *= av_get_time_base();
         for (i = 0; i < s->nb_tseq; i++) {
             if (s->tseq[i].ts.type == 'N') {
                 s->tseq[i].ts.t += now;
@@ -1298,9 +1298,9 @@ static int generate_intervals(void *log, struct sbg_script *s, int sample_rate,
     /* Convert timestamps */
     for (i = -1; i < s->nb_events; i++) {
         ev1 = i < 0 ? &ev0 : &s->events[i];
-        ev1->ts_int   = av_rescale(ev1->ts_int,   sample_rate, AV_TIME_BASE);
-        ev1->ts_trans = av_rescale(ev1->ts_trans, sample_rate, AV_TIME_BASE);
-        ev1->ts_next  = av_rescale(ev1->ts_next,  sample_rate, AV_TIME_BASE);
+        ev1->ts_int   = av_rescale(ev1->ts_int,   sample_rate, av_get_time_base());
+        ev1->ts_trans = av_rescale(ev1->ts_trans, sample_rate, av_get_time_base());
+        ev1->ts_next  = av_rescale(ev1->ts_next,  sample_rate, av_get_time_base());
     }
 
     /* Generate intervals */
@@ -1420,10 +1420,10 @@ static av_cold int sbg_read_header(AVFormatContext *avf)
     avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
     st->probe_packets = 0;
     st->start_time    = av_rescale(script.start_ts,
-                                   sbg->sample_rate, AV_TIME_BASE);
+                                   sbg->sample_rate, av_get_time_base());
     st->duration      = script.end_ts == AV_NOPTS_VALUE ? AV_NOPTS_VALUE :
                         av_rescale(script.end_ts - script.start_ts,
-                                   sbg->sample_rate, AV_TIME_BASE);
+                                   sbg->sample_rate, av_get_time_base());
     st->cur_dts       = st->start_time;
     r = encode_intervals(&script, st->codec, &inter);
     if (r < 0)
@@ -1466,7 +1466,7 @@ static int sbg_read_seek2(AVFormatContext *avf, int stream_index,
     if (flags || stream_index > 0)
         return AVERROR(EINVAL);
     if (stream_index < 0)
-        ts = av_rescale_q(ts, AV_TIME_BASE_Q, avf->streams[0]->time_base);
+        ts = av_rescale_q(ts, av_get_time_base_q(), avf->streams[0]->time_base);
     avf->streams[0]->cur_dts = ts;
     return 0;
 }
diff --git a/libavformat/seek-test.c b/libavformat/seek-test.c
index 8b0611d..7e88532 100644
--- a/libavformat/seek-test.c
+++ b/libavformat/seek-test.c
@@ -136,15 +136,15 @@ int main(int argc, char **argv)
         if(i>25) break;
 
         stream_id= (i>>1)%(ic->nb_streams+1) - 1;
-        timestamp= (i*19362894167LL) % (duration*AV_TIME_BASE) - AV_TIME_BASE;
+        timestamp= (i*19362894167LL) % (duration*av_get_time_base()) - av_get_time_base();
         if(stream_id>=0){
             st= ic->streams[stream_id];
-            timestamp= av_rescale_q(timestamp, AV_TIME_BASE_Q, st->time_base);
+            timestamp= av_rescale_q(timestamp, av_get_time_base_q(), st->time_base);
         }
         //FIXME fully test the new seek API
         if(i&1) ret = avformat_seek_file(ic, stream_id, INT64_MIN, timestamp, timestamp, 0);
         else    ret = avformat_seek_file(ic, stream_id, timestamp, timestamp, INT64_MAX, 0);
-        ts_str(ts_buf, timestamp, stream_id < 0 ? AV_TIME_BASE_Q : st->time_base);
+        ts_str(ts_buf, timestamp, stream_id < 0 ? av_get_time_base_q() : st->time_base);
         printf("ret:%-10s st:%2d flags:%d  ts:%s\n", ret_str(ret), stream_id, i&1, ts_buf);
     }
 
diff --git a/libavformat/seek.c b/libavformat/seek.c
index bb5ca87..d5c2f7b 100644
--- a/libavformat/seek.c
+++ b/libavformat/seek.c
@@ -273,9 +273,9 @@ int64_t ff_gen_syncpoint_search(AVFormatContext *s,
             st = s->streams[stream_index];
             time_base = st->time_base;
         } else {
-            // no reference stream, use AV_TIME_BASE as reference time base
+            // no reference stream, use av_get_time_base() as reference time base
             time_base.num = 1;
-            time_base.den = AV_TIME_BASE;
+            time_base.den = av_get_time_base();
         }
     }
 
diff --git a/libavformat/segment.c b/libavformat/segment.c
index 91c1432..0b97c1d 100644
--- a/libavformat/segment.c
+++ b/libavformat/segment.c
@@ -659,6 +659,7 @@ static int seg_write_packet(AVFormatContext *s, AVPacket *pkt)
     SegmentContext *seg = s->priv_data;
     AVFormatContext *oc = seg->avf;
     AVStream *st = s->streams[pkt->stream_index];
+    AVRational tb = av_get_time_base_q();
     int64_t end_pts = INT64_MAX, offset;
     int start_frame = INT_MAX;
     int ret;
@@ -683,7 +684,7 @@ static int seg_write_packet(AVFormatContext *s, AVPacket *pkt)
         (seg->frame_count >= start_frame ||
          (pkt->pts != AV_NOPTS_VALUE &&
           av_compare_ts(pkt->pts, st->time_base,
-                        end_pts-seg->time_delta, AV_TIME_BASE_Q) >= 0))) {
+                        end_pts-seg->time_delta, av_get_time_base_q()) >= 0))) {
         if ((ret = segment_end(s, seg->individual_header_trailer, 0)) < 0)
             goto fail;
 
@@ -694,7 +695,7 @@ static int seg_write_packet(AVFormatContext *s, AVPacket *pkt)
 
         seg->cur_entry.index = seg->segment_idx;
         seg->cur_entry.start_time = (double)pkt->pts * av_q2d(st->time_base);
-        seg->cur_entry.start_pts = av_rescale_q(pkt->pts, st->time_base, AV_TIME_BASE_Q);
+        seg->cur_entry.start_pts = av_rescale_q(pkt->pts, st->time_base, av_get_time_base_q());
     } else if (pkt->pts != AV_NOPTS_VALUE) {
         seg->cur_entry.end_time =
             FFMAX(seg->cur_entry.end_time, (double)(pkt->pts + pkt->duration) * av_q2d(st->time_base));
@@ -709,13 +710,13 @@ static int seg_write_packet(AVFormatContext *s, AVPacket *pkt)
 
     av_log(s, AV_LOG_DEBUG, "stream:%d start_pts_time:%s pts:%s pts_time:%s dts:%s dts_time:%s",
            pkt->stream_index,
-           av_ts2timestr(seg->cur_entry.start_pts, &AV_TIME_BASE_Q),
+           av_ts2timestr(seg->cur_entry.start_pts, &tb),
            av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &st->time_base),
            av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &st->time_base));
 
     /* compute new timestamps */
     offset = av_rescale_q(seg->initial_offset - (seg->reset_timestamps ? seg->cur_entry.start_pts : 0),
-                          AV_TIME_BASE_Q, st->time_base);
+                          av_get_time_base_q(), st->time_base);
     if (pkt->pts != AV_NOPTS_VALUE)
         pkt->pts += offset;
     if (pkt->dts != AV_NOPTS_VALUE)
diff --git a/libavformat/smoothstreamingenc.c b/libavformat/smoothstreamingenc.c
index fe18a95..4aedc97 100644
--- a/libavformat/smoothstreamingenc.c
+++ b/libavformat/smoothstreamingenc.c
@@ -581,7 +581,7 @@ static int ism_write_packet(AVFormatContext *s, AVPacket *pkt)
 
     if ((!c->has_video || st->codec->codec_type == AVMEDIA_TYPE_VIDEO) &&
         av_compare_ts(pkt->dts - st->first_dts, st->time_base,
-                      end_dts, AV_TIME_BASE_Q) >= 0 &&
+                      end_dts, av_get_time_base_q()) >= 0 &&
         pkt->flags & AV_PKT_FLAG_KEY && os->packets_written) {
 
         if ((ret = ism_flush(s, 0)) < 0)
diff --git a/libavformat/utils.c b/libavformat/utils.c
index 40d886f..3e3103a 100644
--- a/libavformat/utils.c
+++ b/libavformat/utils.c
@@ -791,7 +791,7 @@ int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
 
         /* TODO: audio: time filter; video: frame reordering (pts != dts) */
         if (s->use_wallclock_as_timestamps)
-            pkt->dts = pkt->pts = av_rescale_q(av_gettime(), AV_TIME_BASE_Q, st->time_base);
+            pkt->dts = pkt->pts = av_rescale_q(av_gettime(), av_get_time_base_q(), st->time_base);
 
         if(!pktl && st->request_probe <= 0)
             return ret;
@@ -2029,8 +2029,8 @@ static int seek_frame_internal(AVFormatContext *s, int stream_index,
             return -1;
 
         st= s->streams[stream_index];
-        /* timestamp for default must be expressed in AV_TIME_BASE units */
-        timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
+        /* timestamp for default must be expressed in av_get_time_base() units */
+        timestamp = av_rescale(timestamp, st->time_base.den, av_get_time_base() * (int64_t)st->time_base.num);
     }
 
     /* first, we try the format specific seek */
@@ -2093,12 +2093,12 @@ int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int
 
         if (stream_index == -1 && s->nb_streams == 1) {
             AVRational time_base = s->streams[0]->time_base;
-            ts = av_rescale_q(ts, AV_TIME_BASE_Q, time_base);
+            ts = av_rescale_q(ts, av_get_time_base_q(), time_base);
             min_ts = av_rescale_rnd(min_ts, time_base.den,
-                                    time_base.num * (int64_t)AV_TIME_BASE,
+                                    time_base.num * (int64_t)av_get_time_base(),
                                     AV_ROUND_UP   | AV_ROUND_PASS_MINMAX);
             max_ts = av_rescale_rnd(max_ts, time_base.den,
-                                    time_base.num * (int64_t)AV_TIME_BASE,
+                                    time_base.num * (int64_t)av_get_time_base(),
                                     AV_ROUND_DOWN | AV_ROUND_PASS_MINMAX);
         }
 
@@ -2172,7 +2172,7 @@ static void update_stream_timings(AVFormatContext *ic)
     for(i = 0;i < ic->nb_streams; i++) {
         st = ic->streams[i];
         if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
-            start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
+            start_time1= av_rescale_q(st->start_time, st->time_base, av_get_time_base_q());
             if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE || st->codec->codec_type == AVMEDIA_TYPE_DATA) {
                 if (start_time1 < start_time_text)
                     start_time_text = start_time1;
@@ -2181,7 +2181,7 @@ static void update_stream_timings(AVFormatContext *ic)
             end_time1 = AV_NOPTS_VALUE;
             if (st->duration != AV_NOPTS_VALUE) {
                 end_time1 = start_time1
-                          + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
+                          + av_rescale_q(st->duration, st->time_base, av_get_time_base_q());
                 end_time = FFMAX(end_time, end_time1);
             }
             for(p = NULL; (p = av_find_program_from_stream(ic, p, i)); ){
@@ -2192,14 +2192,14 @@ static void update_stream_timings(AVFormatContext *ic)
             }
         }
         if (st->duration != AV_NOPTS_VALUE) {
-            duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
+            duration1 = av_rescale_q(st->duration, st->time_base, av_get_time_base_q());
             duration = FFMAX(duration, duration1);
         }
     }
-    if (start_time == INT64_MAX || (start_time > start_time_text && start_time - start_time_text < AV_TIME_BASE))
+    if (start_time == INT64_MAX || (start_time > start_time_text && start_time - start_time_text < av_get_time_base()))
         start_time = start_time_text;
     else if(start_time > start_time_text)
-        av_log(ic, AV_LOG_VERBOSE, "Ignoring outlier non primary stream starttime %f\n", start_time_text / (float)AV_TIME_BASE);
+        av_log(ic, AV_LOG_VERBOSE, "Ignoring outlier non primary stream starttime %f\n", start_time_text / (float)av_get_time_base());
 
     if (start_time != INT64_MAX) {
         ic->start_time = start_time;
@@ -2219,7 +2219,7 @@ static void update_stream_timings(AVFormatContext *ic)
     }
         if (ic->pb && (filesize = avio_size(ic->pb)) > 0 && ic->duration != AV_NOPTS_VALUE) {
             /* compute the bitrate */
-            double bitrate = (double)filesize * 8.0 * AV_TIME_BASE /
+            double bitrate = (double)filesize * 8.0 * av_get_time_base() /
                 (double)ic->duration;
             if (bitrate >= 0 && bitrate <= INT_MAX)
                 ic->bit_rate = bitrate;
@@ -2236,9 +2236,9 @@ static void fill_all_stream_timings(AVFormatContext *ic)
         st = ic->streams[i];
         if (st->start_time == AV_NOPTS_VALUE) {
             if(ic->start_time != AV_NOPTS_VALUE)
-                st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
+                st->start_time = av_rescale_q(ic->start_time, av_get_time_base_q(), st->time_base);
             if(ic->duration != AV_NOPTS_VALUE)
-                st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
+                st->duration = av_rescale_q(ic->duration, av_get_time_base_q(), st->time_base);
         }
     }
 }
@@ -2401,12 +2401,12 @@ static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
         for(i = 0;i < ic->nb_streams; i++) {
             st = ic->streams[i];
             av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i,
-                    (double) st->start_time / AV_TIME_BASE,
-                    (double) st->duration   / AV_TIME_BASE);
+                    (double) st->start_time / av_get_time_base(),
+                    (double) st->duration   / av_get_time_base());
         }
         av_dlog(ic, "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
-                (double) ic->start_time / AV_TIME_BASE,
-                (double) ic->duration   / AV_TIME_BASE,
+                (double) ic->start_time / av_get_time_base(),
+                (double) ic->duration   / av_get_time_base(),
                 ic->bit_rate / 1000);
     }
 }
@@ -2635,7 +2635,7 @@ static void compute_chapters_end(AVFormatContext *s)
     for (i = 0; i < s->nb_chapters; i++)
         if (s->chapters[i]->end == AV_NOPTS_VALUE) {
             AVChapter *ch = s->chapters[i];
-            int64_t   end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q, ch->time_base)
+            int64_t   end = max_time ? av_rescale_q(max_time, av_get_time_base_q(), ch->time_base)
                                      : INT64_MAX;
 
             for (j = 0; j < s->nb_chapters; j++) {
@@ -3028,15 +3028,15 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
         if (st->codec_info_nb_frames>1) {
             int64_t t=0;
             if (st->time_base.den > 0)
-                t = av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q);
+                t = av_rescale_q(st->info->codec_info_duration, st->time_base, av_get_time_base_q());
             if (st->avg_frame_rate.num > 0)
-                t = FFMAX(t, av_rescale_q(st->codec_info_nb_frames, av_inv_q(st->avg_frame_rate), AV_TIME_BASE_Q));
+                t = FFMAX(t, av_rescale_q(st->codec_info_nb_frames, av_inv_q(st->avg_frame_rate), av_get_time_base_q()));
 
             if (   t==0
                 && st->codec_info_nb_frames>30
                 && st->info->fps_first_dts != AV_NOPTS_VALUE
                 && st->info->fps_last_dts  != AV_NOPTS_VALUE)
-                t = FFMAX(t, av_rescale_q(st->info->fps_last_dts - st->info->fps_first_dts, st->time_base, AV_TIME_BASE_Q));
+                t = FFMAX(t, av_rescale_q(st->info->fps_last_dts - st->info->fps_first_dts, st->time_base, av_get_time_base_q()));
 
             if (t >= ic->max_analyze_duration) {
                 av_log(ic, AV_LOG_VERBOSE, "max_analyze_duration %d reached at %"PRId64" microseconds\n", ic->max_analyze_duration, t);
@@ -3665,24 +3665,24 @@ void av_dump_format(AVFormatContext *ic,
         if (ic->duration != AV_NOPTS_VALUE) {
             int hours, mins, secs, us;
             int64_t duration = ic->duration + 5000;
-            secs = duration / AV_TIME_BASE;
-            us = duration % AV_TIME_BASE;
+            secs = duration / av_get_time_base();
+            us = duration % av_get_time_base();
             mins = secs / 60;
             secs %= 60;
             hours = mins / 60;
             mins %= 60;
             av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
-                   (100 * us) / AV_TIME_BASE);
+                   (100 * us) / av_get_time_base());
         } else {
             av_log(NULL, AV_LOG_INFO, "N/A");
         }
         if (ic->start_time != AV_NOPTS_VALUE) {
             int secs, us;
             av_log(NULL, AV_LOG_INFO, ", start: ");
-            secs = ic->start_time / AV_TIME_BASE;
-            us = abs(ic->start_time % AV_TIME_BASE);
+            secs = ic->start_time / av_get_time_base();
+            us = abs(ic->start_time % av_get_time_base());
             av_log(NULL, AV_LOG_INFO, "%d.%06d",
-                   secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
+                   secs, (int)av_rescale(us, 1000000, av_get_time_base()));
         }
         av_log(NULL, AV_LOG_INFO, ", bitrate: ");
         if (ic->bit_rate) {
diff --git a/libavutil/utils.c b/libavutil/utils.c
index 434c8b3..daf3c69 100644
--- a/libavutil/utils.c
+++ b/libavutil/utils.c
@@ -121,5 +121,7 @@ int av_get_time_base(void)
 
 AVRational av_get_time_base_q(void)
 {
-    return (AVRational){1, AV_TIME_BASE};
+    int den = av_get_time_base();
+
+    return (AVRational){1, den};
 }
diff --git a/tools/aviocat.c b/tools/aviocat.c
index e161d58..639cc6f 100644
--- a/tools/aviocat.c
+++ b/tools/aviocat.c
@@ -81,7 +81,7 @@ int main(int argc, char **argv)
         stream_pos += n;
         if (bps) {
             avio_flush(output);
-            while ((av_gettime() - start_time) * bps / AV_TIME_BASE < stream_pos)
+            while ((av_gettime() - start_time) * bps / av_get_time_base() < stream_pos)
                 av_usleep(50 * 1000);
         }
     }
diff --git a/tools/ismindex.c b/tools/ismindex.c
index 4dc3e12..5de0073 100644
--- a/tools/ismindex.c
+++ b/tools/ismindex.c
@@ -337,7 +337,7 @@ static int handle_file(struct Tracks *tracks, const char *file, int split)
         }
 
         tracks->duration = FFMAX(tracks->duration,
-                                 av_rescale_rnd(track->duration, AV_TIME_BASE,
+                                 av_rescale_rnd(track->duration, av_get_time_base(),
                                                 track->timescale, AV_ROUND_UP));
 
         if (track->is_audio) {
-- 
1.8.5.2



More information about the ffmpeg-devel mailing list