FFmpeg
mediacodecdec_common.c
Go to the documentation of this file.
1 /*
2  * Android MediaCodec decoder
3  *
4  * Copyright (c) 2015-2016 Matthieu Bouron <matthieu.bouron stupeflix.com>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include <string.h>
24 #include <sys/types.h>
25 
26 #include "libavutil/common.h"
28 #include "libavutil/mem.h"
29 #include "libavutil/log.h"
30 #include "libavutil/pixfmt.h"
31 #include "libavutil/time.h"
32 #include "libavutil/timestamp.h"
33 
34 #include "avcodec.h"
35 #include "internal.h"
36 
37 #include "mediacodec.h"
38 #include "mediacodec_surface.h"
39 #include "mediacodec_sw_buffer.h"
40 #include "mediacodec_wrapper.h"
41 #include "mediacodecdec_common.h"
42 
43 /**
44  * OMX.k3.video.decoder.avc, OMX.NVIDIA.* OMX.SEC.avc.dec and OMX.google
45  * codec workarounds used in various place are taken from the Gstreamer
46  * project.
47  *
48  * Gstreamer references:
49  * https://cgit.freedesktop.org/gstreamer/gst-plugins-bad/tree/sys/androidmedia/
50  *
51  * Gstreamer copyright notice:
52  *
53  * Copyright (C) 2012, Collabora Ltd.
54  * Author: Sebastian Dröge <sebastian.droege@collabora.co.uk>
55  *
56  * Copyright (C) 2012, Rafaël Carré <funman@videolanorg>
57  *
58  * Copyright (C) 2015, Sebastian Dröge <sebastian@centricular.com>
59  *
60  * Copyright (C) 2014-2015, Collabora Ltd.
61  * Author: Matthieu Bouron <matthieu.bouron@gcollabora.com>
62  *
63  * Copyright (C) 2015, Edward Hervey
64  * Author: Edward Hervey <bilboed@gmail.com>
65  *
66  * Copyright (C) 2015, Matthew Waters <matthew@centricular.com>
67  *
68  * This library is free software; you can redistribute it and/or
69  * modify it under the terms of the GNU Lesser General Public
70  * License as published by the Free Software Foundation
71  * version 2.1 of the License.
72  *
73  * This library is distributed in the hope that it will be useful,
74  * but WITHOUT ANY WARRANTY; without even the implied warranty of
75  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
76  * Lesser General Public License for more details.
77  *
78  * You should have received a copy of the GNU Lesser General Public
79  * License along with this library; if not, write to the Free Software
80  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
81  *
82  */
83 
84 #define INPUT_DEQUEUE_TIMEOUT_US 8000
85 #define OUTPUT_DEQUEUE_TIMEOUT_US 8000
86 #define OUTPUT_DEQUEUE_BLOCK_TIMEOUT_US 1000000
87 
88 enum {
98 };
99 
100 static const struct {
101 
104 
105 } color_formats[] = {
106 
109  { COLOR_QCOM_FormatYUV420SemiPlanar, AV_PIX_FMT_NV12 },
110  { COLOR_QCOM_FormatYUV420SemiPlanar32m, AV_PIX_FMT_NV12 },
112  { COLOR_TI_FormatYUV420PackedSemiPlanar, AV_PIX_FMT_NV12 },
114  { 0 }
115 };
116 
119  int color_format)
120 {
121  int i;
123 
124  if (s->surface) {
125  return AV_PIX_FMT_MEDIACODEC;
126  }
127 
128  if (!strcmp(s->codec_name, "OMX.k3.video.decoder.avc") && color_format == COLOR_FormatYCbYCr) {
130  }
131 
132  for (i = 0; i < FF_ARRAY_ELEMS(color_formats); i++) {
133  if (color_formats[i].color_format == color_format) {
134  return color_formats[i].pix_fmt;
135  }
136  }
137 
138  av_log(avctx, AV_LOG_ERROR, "Output color format 0x%x (value=%d) is not supported\n",
139  color_format, color_format);
140 
141  return ret;
142 }
143 
145 {
146  atomic_fetch_add(&s->refcount, 1);
147 }
148 
150 {
151  if (!s)
152  return;
153 
154  if (atomic_fetch_sub(&s->refcount, 1) == 1) {
155  if (s->codec) {
157  s->codec = NULL;
158  }
159 
160  if (s->format) {
162  s->format = NULL;
163  }
164 
165  if (s->surface) {
167  s->surface = NULL;
168  }
169 
170  av_freep(&s->codec_name);
171  av_freep(&s);
172  }
173 }
174 
175 static void mediacodec_buffer_release(void *opaque, uint8_t *data)
176 {
177  AVMediaCodecBuffer *buffer = opaque;
178  MediaCodecDecContext *ctx = buffer->ctx;
179  int released = atomic_load(&buffer->released);
180 
181  if (!released && (ctx->delay_flush || buffer->serial == atomic_load(&ctx->serial))) {
183  av_log(ctx->avctx, AV_LOG_DEBUG,
184  "Releasing output buffer %zd (%p) ts=%"PRId64" on free() [%d pending]\n",
185  buffer->index, buffer, buffer->pts, atomic_load(&ctx->hw_buffer_count));
186  ff_AMediaCodec_releaseOutputBuffer(ctx->codec, buffer->index, 0);
187  }
188 
189  if (ctx->delay_flush)
191  av_freep(&buffer);
192 }
193 
196  ssize_t index,
198  AVFrame *frame)
199 {
200  int ret = 0;
201  int status = 0;
202  AVMediaCodecBuffer *buffer = NULL;
203 
204  frame->buf[0] = NULL;
205  frame->width = avctx->width;
206  frame->height = avctx->height;
207  frame->format = avctx->pix_fmt;
209 
210  if (avctx->pkt_timebase.num && avctx->pkt_timebase.den) {
211  frame->pts = av_rescale_q(info->presentationTimeUs,
213  avctx->pkt_timebase);
214  } else {
215  frame->pts = info->presentationTimeUs;
216  }
217 #if FF_API_PKT_PTS
219  frame->pkt_pts = frame->pts;
221 #endif
222  frame->pkt_dts = AV_NOPTS_VALUE;
223 
224  buffer = av_mallocz(sizeof(AVMediaCodecBuffer));
225  if (!buffer) {
226  ret = AVERROR(ENOMEM);
227  goto fail;
228  }
229 
230  atomic_init(&buffer->released, 0);
231 
232  frame->buf[0] = av_buffer_create(NULL,
233  0,
235  buffer,
237 
238  if (!frame->buf[0]) {
239  ret = AVERROR(ENOMEM);
240  goto fail;
241 
242  }
243 
244  buffer->ctx = s;
245  buffer->serial = atomic_load(&s->serial);
246  if (s->delay_flush)
248 
249  buffer->index = index;
250  buffer->pts = info->presentationTimeUs;
251 
252  frame->data[3] = (uint8_t *)buffer;
253 
255  av_log(avctx, AV_LOG_DEBUG,
256  "Wrapping output buffer %zd (%p) ts=%"PRId64" [%d pending]\n",
257  buffer->index, buffer, buffer->pts, atomic_load(&s->hw_buffer_count));
258 
259  return 0;
260 fail:
261  av_freep(buffer);
262  av_buffer_unref(&frame->buf[0]);
263  status = ff_AMediaCodec_releaseOutputBuffer(s->codec, index, 0);
264  if (status < 0) {
265  av_log(avctx, AV_LOG_ERROR, "Failed to release output buffer\n");
266  ret = AVERROR_EXTERNAL;
267  }
268 
269  return ret;
270 }
271 
274  uint8_t *data,
275  size_t size,
276  ssize_t index,
278  AVFrame *frame)
279 {
280  int ret = 0;
281  int status = 0;
282 
283  frame->width = avctx->width;
284  frame->height = avctx->height;
285  frame->format = avctx->pix_fmt;
286 
287  /* MediaCodec buffers needs to be copied to our own refcounted buffers
288  * because the flush command invalidates all input and output buffers.
289  */
290  if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
291  av_log(avctx, AV_LOG_ERROR, "Could not allocate buffer\n");
292  goto done;
293  }
294 
295  /* Override frame->pkt_pts as ff_get_buffer will override its value based
296  * on the last avpacket received which is not in sync with the frame:
297  * * N avpackets can be pushed before 1 frame is actually returned
298  * * 0-sized avpackets are pushed to flush remaining frames at EOS */
299  if (avctx->pkt_timebase.num && avctx->pkt_timebase.den) {
300  frame->pts = av_rescale_q(info->presentationTimeUs,
302  avctx->pkt_timebase);
303  } else {
304  frame->pts = info->presentationTimeUs;
305  }
306 #if FF_API_PKT_PTS
308  frame->pkt_pts = frame->pts;
310 #endif
311  frame->pkt_dts = AV_NOPTS_VALUE;
312 
313  av_log(avctx, AV_LOG_TRACE,
314  "Frame: width=%d stride=%d height=%d slice-height=%d "
315  "crop-top=%d crop-bottom=%d crop-left=%d crop-right=%d encoder=%s "
316  "destination linesizes=%d,%d,%d\n" ,
317  avctx->width, s->stride, avctx->height, s->slice_height,
319  frame->linesize[0], frame->linesize[1], frame->linesize[2]);
320 
321  switch (s->color_format) {
323  ff_mediacodec_sw_buffer_copy_yuv420_planar(avctx, s, data, size, info, frame);
324  break;
328  ff_mediacodec_sw_buffer_copy_yuv420_semi_planar(avctx, s, data, size, info, frame);
329  break;
332  ff_mediacodec_sw_buffer_copy_yuv420_packed_semi_planar(avctx, s, data, size, info, frame);
333  break;
336  break;
337  default:
338  av_log(avctx, AV_LOG_ERROR, "Unsupported color format 0x%x (value=%d)\n",
339  s->color_format, s->color_format);
340  ret = AVERROR(EINVAL);
341  goto done;
342  }
343 
344  ret = 0;
345 done:
346  status = ff_AMediaCodec_releaseOutputBuffer(s->codec, index, 0);
347  if (status < 0) {
348  av_log(avctx, AV_LOG_ERROR, "Failed to release output buffer\n");
349  ret = AVERROR_EXTERNAL;
350  }
351 
352  return ret;
353 }
354 
355 #define AMEDIAFORMAT_GET_INT32(name, key, mandatory) do { \
356  int32_t value = 0; \
357  if (ff_AMediaFormat_getInt32(s->format, key, &value)) { \
358  (name) = value; \
359  } else if (mandatory) { \
360  av_log(avctx, AV_LOG_ERROR, "Could not get %s from format %s\n", key, format); \
361  ret = AVERROR_EXTERNAL; \
362  goto fail; \
363  } \
364 } while (0) \
365 
367 {
368  int ret = 0;
369  int width = 0;
370  int height = 0;
371  char *format = NULL;
372 
373  if (!s->format) {
374  av_log(avctx, AV_LOG_ERROR, "Output MediaFormat is not set\n");
375  return AVERROR(EINVAL);
376  }
377 
378  format = ff_AMediaFormat_toString(s->format);
379  if (!format) {
380  return AVERROR_EXTERNAL;
381  }
382  av_log(avctx, AV_LOG_DEBUG, "Parsing MediaFormat %s\n", format);
383 
384  /* Mandatory fields */
385  AMEDIAFORMAT_GET_INT32(s->width, "width", 1);
386  AMEDIAFORMAT_GET_INT32(s->height, "height", 1);
387 
388  AMEDIAFORMAT_GET_INT32(s->stride, "stride", 0);
389  s->stride = s->stride > 0 ? s->stride : s->width;
390 
391  AMEDIAFORMAT_GET_INT32(s->slice_height, "slice-height", 0);
392 
393  if (strstr(s->codec_name, "OMX.Nvidia.") && s->slice_height == 0) {
394  s->slice_height = FFALIGN(s->height, 16);
395  } else if (strstr(s->codec_name, "OMX.SEC.avc.dec")) {
396  s->slice_height = avctx->height;
397  s->stride = avctx->width;
398  } else if (s->slice_height == 0) {
399  s->slice_height = s->height;
400  }
401 
402  AMEDIAFORMAT_GET_INT32(s->color_format, "color-format", 1);
403  avctx->pix_fmt = mcdec_map_color_format(avctx, s, s->color_format);
404  if (avctx->pix_fmt == AV_PIX_FMT_NONE) {
405  av_log(avctx, AV_LOG_ERROR, "Output color format is not supported\n");
406  ret = AVERROR(EINVAL);
407  goto fail;
408  }
409 
410  /* Optional fields */
411  AMEDIAFORMAT_GET_INT32(s->crop_top, "crop-top", 0);
412  AMEDIAFORMAT_GET_INT32(s->crop_bottom, "crop-bottom", 0);
413  AMEDIAFORMAT_GET_INT32(s->crop_left, "crop-left", 0);
414  AMEDIAFORMAT_GET_INT32(s->crop_right, "crop-right", 0);
415 
416  width = s->crop_right + 1 - s->crop_left;
417  height = s->crop_bottom + 1 - s->crop_top;
418 
419  AMEDIAFORMAT_GET_INT32(s->display_width, "display-width", 0);
420  AMEDIAFORMAT_GET_INT32(s->display_height, "display-height", 0);
421 
422  if (s->display_width && s->display_height) {
423  AVRational sar = av_div_q(
425  (AVRational){ width, height });
426  ff_set_sar(avctx, sar);
427  }
428 
429  av_log(avctx, AV_LOG_INFO,
430  "Output crop parameters top=%d bottom=%d left=%d right=%d, "
431  "resulting dimensions width=%d height=%d\n",
432  s->crop_top, s->crop_bottom, s->crop_left, s->crop_right,
433  width, height);
434 
435  av_freep(&format);
436  return ff_set_dimensions(avctx, width, height);
437 fail:
438  av_freep(&format);
439  return ret;
440 }
441 
443 {
444  FFAMediaCodec *codec = s->codec;
445  int status;
446 
447  s->output_buffer_count = 0;
448 
449  s->draining = 0;
450  s->flushing = 0;
451  s->eos = 0;
452  atomic_fetch_add(&s->serial, 1);
454  s->current_input_buffer = -1;
455 
456  status = ff_AMediaCodec_flush(codec);
457  if (status < 0) {
458  av_log(avctx, AV_LOG_ERROR, "Failed to flush codec\n");
459  return AVERROR_EXTERNAL;
460  }
461 
462  return 0;
463 }
464 
466  const char *mime, FFAMediaFormat *format)
467 {
468  int ret = 0;
469  int status;
470  int profile;
471 
472  enum AVPixelFormat pix_fmt;
473  static const enum AVPixelFormat pix_fmts[] = {
476  };
477 
478  s->avctx = avctx;
479  atomic_init(&s->refcount, 1);
481  atomic_init(&s->serial, 1);
482  s->current_input_buffer = -1;
483 
484  pix_fmt = ff_get_format(avctx, pix_fmts);
485  if (pix_fmt == AV_PIX_FMT_MEDIACODEC) {
486  AVMediaCodecContext *user_ctx = avctx->hwaccel_context;
487 
488  if (avctx->hw_device_ctx) {
489  AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)(avctx->hw_device_ctx->data);
490  if (device_ctx->type == AV_HWDEVICE_TYPE_MEDIACODEC) {
491  if (device_ctx->hwctx) {
492  AVMediaCodecDeviceContext *mediacodec_ctx = (AVMediaCodecDeviceContext *)device_ctx->hwctx;
493  s->surface = ff_mediacodec_surface_ref(mediacodec_ctx->surface, avctx);
494  av_log(avctx, AV_LOG_INFO, "Using surface %p\n", s->surface);
495  }
496  }
497  }
498 
499  if (!s->surface && user_ctx && user_ctx->surface) {
500  s->surface = ff_mediacodec_surface_ref(user_ctx->surface, avctx);
501  av_log(avctx, AV_LOG_INFO, "Using surface %p\n", s->surface);
502  }
503  }
504 
506  if (profile < 0) {
507  av_log(avctx, AV_LOG_WARNING, "Unsupported or unknown profile\n");
508  }
509 
510  s->codec_name = ff_AMediaCodecList_getCodecNameByType(mime, profile, 0, avctx);
511  if (!s->codec_name) {
512  ret = AVERROR_EXTERNAL;
513  goto fail;
514  }
515 
516  av_log(avctx, AV_LOG_DEBUG, "Found decoder %s\n", s->codec_name);
518  if (!s->codec) {
519  av_log(avctx, AV_LOG_ERROR, "Failed to create media decoder for type %s and name %s\n", mime, s->codec_name);
520  ret = AVERROR_EXTERNAL;
521  goto fail;
522  }
523 
524  status = ff_AMediaCodec_configure(s->codec, format, s->surface, NULL, 0);
525  if (status < 0) {
526  char *desc = ff_AMediaFormat_toString(format);
527  av_log(avctx, AV_LOG_ERROR,
528  "Failed to configure codec %s (status = %d) with format %s\n",
529  s->codec_name, status, desc);
530  av_freep(&desc);
531 
532  ret = AVERROR_EXTERNAL;
533  goto fail;
534  }
535 
536  status = ff_AMediaCodec_start(s->codec);
537  if (status < 0) {
538  char *desc = ff_AMediaFormat_toString(format);
539  av_log(avctx, AV_LOG_ERROR,
540  "Failed to start codec %s (status = %d) with format %s\n",
541  s->codec_name, status, desc);
542  av_freep(&desc);
543  ret = AVERROR_EXTERNAL;
544  goto fail;
545  }
546 
548  if (s->format) {
549  if ((ret = mediacodec_dec_parse_format(avctx, s)) < 0) {
550  av_log(avctx, AV_LOG_ERROR,
551  "Failed to configure context\n");
552  goto fail;
553  }
554  }
555 
556  av_log(avctx, AV_LOG_DEBUG, "MediaCodec %p started successfully\n", s->codec);
557 
558  return 0;
559 
560 fail:
561  av_log(avctx, AV_LOG_ERROR, "MediaCodec %p failed to start\n", s->codec);
562  ff_mediacodec_dec_close(avctx, s);
563  return ret;
564 }
565 
567  AVPacket *pkt, bool wait)
568 {
569  int offset = 0;
570  int need_draining = 0;
571  uint8_t *data;
572  size_t size;
573  FFAMediaCodec *codec = s->codec;
574  int status;
575  int64_t input_dequeue_timeout_us = wait ? INPUT_DEQUEUE_TIMEOUT_US : 0;
576  int64_t pts;
577 
578  if (s->flushing) {
579  av_log(avctx, AV_LOG_ERROR, "Decoder is flushing and cannot accept new buffer "
580  "until all output buffers have been released\n");
581  return AVERROR_EXTERNAL;
582  }
583 
584  if (pkt->size == 0) {
585  need_draining = 1;
586  }
587 
588  if (s->draining && s->eos) {
589  return AVERROR_EOF;
590  }
591 
592  while (offset < pkt->size || (need_draining && !s->draining)) {
593  ssize_t index = s->current_input_buffer;
594  if (index < 0) {
595  index = ff_AMediaCodec_dequeueInputBuffer(codec, input_dequeue_timeout_us);
596  if (ff_AMediaCodec_infoTryAgainLater(codec, index)) {
597  av_log(avctx, AV_LOG_TRACE, "No input buffer available, try again later\n");
598  break;
599  }
600 
601  if (index < 0) {
602  av_log(avctx, AV_LOG_ERROR, "Failed to dequeue input buffer (status=%zd)\n", index);
603  return AVERROR_EXTERNAL;
604  }
605  }
606  s->current_input_buffer = -1;
607 
608  data = ff_AMediaCodec_getInputBuffer(codec, index, &size);
609  if (!data) {
610  av_log(avctx, AV_LOG_ERROR, "Failed to get input buffer\n");
611  return AVERROR_EXTERNAL;
612  }
613 
614  pts = pkt->pts;
615  if (pts == AV_NOPTS_VALUE) {
616  av_log(avctx, AV_LOG_WARNING, "Input packet is missing PTS\n");
617  pts = 0;
618  }
619  if (pts && avctx->pkt_timebase.num && avctx->pkt_timebase.den) {
620  pts = av_rescale_q(pts, avctx->pkt_timebase, AV_TIME_BASE_Q);
621  }
622 
623  if (need_draining) {
625 
626  av_log(avctx, AV_LOG_DEBUG, "Sending End Of Stream signal\n");
627 
628  status = ff_AMediaCodec_queueInputBuffer(codec, index, 0, 0, pts, flags);
629  if (status < 0) {
630  av_log(avctx, AV_LOG_ERROR, "Failed to queue input empty buffer (status = %d)\n", status);
631  return AVERROR_EXTERNAL;
632  }
633 
634  av_log(avctx, AV_LOG_TRACE,
635  "Queued empty EOS input buffer %zd with flags=%d\n", index, flags);
636 
637  s->draining = 1;
638  return 0;
639  }
640 
641  size = FFMIN(pkt->size - offset, size);
642  memcpy(data, pkt->data + offset, size);
643  offset += size;
644 
645  status = ff_AMediaCodec_queueInputBuffer(codec, index, 0, size, pts, 0);
646  if (status < 0) {
647  av_log(avctx, AV_LOG_ERROR, "Failed to queue input buffer (status = %d)\n", status);
648  return AVERROR_EXTERNAL;
649  }
650 
651  av_log(avctx, AV_LOG_TRACE,
652  "Queued input buffer %zd size=%zd ts=%"PRIi64"\n", index, size, pts);
653  }
654 
655  if (offset == 0)
656  return AVERROR(EAGAIN);
657  return offset;
658 }
659 
661  AVFrame *frame, bool wait)
662 {
663  int ret;
664  uint8_t *data;
665  ssize_t index;
666  size_t size;
667  FFAMediaCodec *codec = s->codec;
669  int status;
670  int64_t output_dequeue_timeout_us = OUTPUT_DEQUEUE_TIMEOUT_US;
671 
672  if (s->draining && s->eos) {
673  return AVERROR_EOF;
674  }
675 
676  if (s->draining) {
677  /* If the codec is flushing or need to be flushed, block for a fair
678  * amount of time to ensure we got a frame */
679  output_dequeue_timeout_us = OUTPUT_DEQUEUE_BLOCK_TIMEOUT_US;
680  } else if (s->output_buffer_count == 0 || !wait) {
681  /* If the codec hasn't produced any frames, do not block so we
682  * can push data to it as fast as possible, and get the first
683  * frame */
684  output_dequeue_timeout_us = 0;
685  }
686 
687  index = ff_AMediaCodec_dequeueOutputBuffer(codec, &info, output_dequeue_timeout_us);
688  if (index >= 0) {
689  av_log(avctx, AV_LOG_TRACE, "Got output buffer %zd"
690  " offset=%" PRIi32 " size=%" PRIi32 " ts=%" PRIi64
691  " flags=%" PRIu32 "\n", index, info.offset, info.size,
692  info.presentationTimeUs, info.flags);
693 
695  s->eos = 1;
696  }
697 
698  if (info.size) {
699  if (s->surface) {
700  if ((ret = mediacodec_wrap_hw_buffer(avctx, s, index, &info, frame)) < 0) {
701  av_log(avctx, AV_LOG_ERROR, "Failed to wrap MediaCodec buffer\n");
702  return ret;
703  }
704  } else {
705  data = ff_AMediaCodec_getOutputBuffer(codec, index, &size);
706  if (!data) {
707  av_log(avctx, AV_LOG_ERROR, "Failed to get output buffer\n");
708  return AVERROR_EXTERNAL;
709  }
710 
711  if ((ret = mediacodec_wrap_sw_buffer(avctx, s, data, size, index, &info, frame)) < 0) {
712  av_log(avctx, AV_LOG_ERROR, "Failed to wrap MediaCodec buffer\n");
713  return ret;
714  }
715  }
716 
717  s->output_buffer_count++;
718  return 0;
719  } else {
720  status = ff_AMediaCodec_releaseOutputBuffer(codec, index, 0);
721  if (status < 0) {
722  av_log(avctx, AV_LOG_ERROR, "Failed to release output buffer\n");
723  }
724  }
725 
726  } else if (ff_AMediaCodec_infoOutputFormatChanged(codec, index)) {
727  char *format = NULL;
728 
729  if (s->format) {
730  status = ff_AMediaFormat_delete(s->format);
731  if (status < 0) {
732  av_log(avctx, AV_LOG_ERROR, "Failed to delete MediaFormat %p\n", s->format);
733  }
734  }
735 
737  if (!s->format) {
738  av_log(avctx, AV_LOG_ERROR, "Failed to get output format\n");
739  return AVERROR_EXTERNAL;
740  }
741 
742  format = ff_AMediaFormat_toString(s->format);
743  if (!format) {
744  return AVERROR_EXTERNAL;
745  }
746  av_log(avctx, AV_LOG_INFO, "Output MediaFormat changed to %s\n", format);
747  av_freep(&format);
748 
749  if ((ret = mediacodec_dec_parse_format(avctx, s)) < 0) {
750  return ret;
751  }
752 
753  } else if (ff_AMediaCodec_infoOutputBuffersChanged(codec, index)) {
755  } else if (ff_AMediaCodec_infoTryAgainLater(codec, index)) {
756  if (s->draining) {
757  av_log(avctx, AV_LOG_ERROR, "Failed to dequeue output buffer within %" PRIi64 "ms "
758  "while draining remaining frames, output will probably lack frames\n",
759  output_dequeue_timeout_us / 1000);
760  } else {
761  av_log(avctx, AV_LOG_TRACE, "No output buffer available, try again later\n");
762  }
763  } else {
764  av_log(avctx, AV_LOG_ERROR, "Failed to dequeue output buffer (status=%zd)\n", index);
765  return AVERROR_EXTERNAL;
766  }
767 
768  return AVERROR(EAGAIN);
769 }
770 
771 /*
772 * ff_mediacodec_dec_flush returns 0 if the flush cannot be performed on
773 * the codec (because the user retains frames). The codec stays in the
774 * flushing state.
775 *
776 * ff_mediacodec_dec_flush returns 1 if the flush can actually be
777 * performed on the codec. The codec leaves the flushing state and can
778 * process again packets.
779 *
780 * ff_mediacodec_dec_flush returns a negative value if an error has
781 * occurred.
782 */
784 {
785  if (!s->surface || atomic_load(&s->refcount) == 1) {
786  int ret;
787 
788  /* No frames (holding a reference to the codec) are retained by the
789  * user, thus we can flush the codec and returns accordingly */
790  if ((ret = mediacodec_dec_flush_codec(avctx, s)) < 0) {
791  return ret;
792  }
793 
794  return 1;
795  }
796 
797  s->flushing = 1;
798  return 0;
799 }
800 
802 {
804 
805  return 0;
806 }
807 
809 {
810  return s->flushing;
811 }
This struct aggregates all the (hardware/vendor-specific) "high-level" state, i.e.
Definition: hwcontext.h:60
This structure holds a reference to a android/view/Surface object that will be used as output by the ...
Definition: mediacodec.h:33
#define NULL
Definition: coverity.c:32
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1370
#define AMEDIAFORMAT_GET_INT32(name, key, mandatory)
AVRational av_div_q(AVRational b, AVRational c)
Divide one rational by another.
Definition: rational.c:88
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
char * ff_AMediaCodecList_getCodecNameByType(const char *mime, int profile, int encoder, void *log_ctx)
Memory handling functions.
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:486
const char * desc
Definition: nvenc.c:68
int num
Numerator.
Definition: rational.h:59
int size
Definition: avcodec.h:1481
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:1947
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1778
void ff_mediacodec_sw_buffer_copy_yuv420_semi_planar(AVCodecContext *avctx, MediaCodecDecContext *s, uint8_t *data, size_t size, FFAMediaCodecBufferInfo *info, AVFrame *frame)
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
static AVPacket pkt
void ff_mediacodec_sw_buffer_copy_yuv420_planar(AVCodecContext *avctx, MediaCodecDecContext *s, uint8_t *data, size_t size, FFAMediaCodecBufferInfo *info, AVFrame *frame)
The code handling the various YUV color formats is taken from the GStreamer project.
int ff_mediacodec_dec_send(AVCodecContext *avctx, MediaCodecDecContext *s, AVPacket *pkt, bool wait)
int ff_mediacodec_dec_close(AVCodecContext *avctx, MediaCodecDecContext *s)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate.The lists are not just lists
int ff_mediacodec_dec_is_flushing(AVCodecContext *avctx, MediaCodecDecContext *s)
int ff_AMediaCodec_flush(FFAMediaCodec *codec)
int ff_AMediaCodec_releaseOutputBuffer(FFAMediaCodec *codec, size_t idx, int render)
uint8_t
void * hwaccel_context
Hardware accelerator context.
Definition: avcodec.h:2744
timestamp utils, mostly useful for debugging/logging purposes
static const struct @120 color_formats[]
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:202
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
int ff_AMediaCodec_infoOutputBuffersChanged(FFAMediaCodec *codec, ssize_t idx)
FFANativeWindow * ff_mediacodec_surface_ref(void *surface, void *log_ctx)
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:388
void * hwctx
The format-specific data, allocated and freed by libavutil along with this context.
Definition: hwcontext.h:91
void * surface
android/view/Surface handle, to be filled by the user.
#define height
int ff_AMediaCodec_infoOutputFormatChanged(FFAMediaCodec *codec, ssize_t idx)
uint8_t * data
Definition: avcodec.h:1480
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
Definition: utils.c:119
#define AVERROR_EOF
End of file.
Definition: error.h:55
#define AV_BUFFER_FLAG_READONLY
Always treat the buffer as read-only, even when it has only one reference.
Definition: buffer.h:113
char * ff_AMediaFormat_toString(FFAMediaFormat *format)
ptrdiff_t size
Definition: opengl_enc.c:100
int color_format
static void ff_mediacodec_dec_unref(MediaCodecDecContext *s)
#define FFALIGN(x, a)
Definition: macros.h:48
#define av_log(a,...)
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
Definition: avcodec.h:3122
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
int width
Definition: frame.h:353
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define atomic_load(object)
Definition: stdatomic.h:93
int ff_mediacodec_dec_flush(AVCodecContext *avctx, MediaCodecDecContext *s)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
uint8_t * ff_AMediaCodec_getInputBuffer(FFAMediaCodec *codec, size_t idx, size_t *out_size)
int ff_mediacodec_surface_unref(FFANativeWindow *window, void *log_ctx)
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
AVBufferRef * av_buffer_create(uint8_t *data, int size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:28
#define atomic_fetch_sub(object, operand)
Definition: stdatomic.h:137
enum AVHWDeviceType type
This field identifies the underlying API used for hardware access.
Definition: hwcontext.h:78
#define fail()
Definition: checkasm.h:122
int ff_mediacodec_dec_receive(AVCodecContext *avctx, MediaCodecDecContext *s, AVFrame *frame, bool wait)
void ff_mediacodec_sw_buffer_copy_yuv420_packed_semi_planar(AVCodecContext *avctx, MediaCodecDecContext *s, uint8_t *data, size_t size, FFAMediaCodecBufferInfo *info, AVFrame *frame)
MIPS optimizations info
Definition: mips.txt:2
int ff_AMediaCodec_queueInputBuffer(FFAMediaCodec *codec, size_t idx, off_t offset, size_t size, uint64_t time, uint32_t flags)
#define FFMIN(a, b)
Definition: common.h:96
ssize_t ff_AMediaCodec_dequeueOutputBuffer(FFAMediaCodec *codec, FFAMediaCodecBufferInfo *info, int64_t timeoutUs)
#define width
int width
picture width / height.
Definition: avcodec.h:1741
void ff_mediacodec_sw_buffer_copy_yuv420_packed_semi_planar_64x32Tile2m8ka(AVCodecContext *avctx, MediaCodecDecContext *s, uint8_t *data, size_t size, FFAMediaCodecBufferInfo *info, AVFrame *frame)
static int mediacodec_wrap_sw_buffer(AVCodecContext *avctx, MediaCodecDecContext *s, uint8_t *data, size_t size, ssize_t index, FFAMediaCodecBufferInfo *info, AVFrame *frame)
int ff_AMediaCodec_delete(FFAMediaCodec *codec)
AVFormatContext * ctx
Definition: movenc.c:48
int ff_AMediaCodec_getBufferFlagEndOfStream(FFAMediaCodec *codec)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
#define FF_ARRAY_ELEMS(a)
ssize_t ff_AMediaCodec_dequeueInputBuffer(FFAMediaCodec *codec, int64_t timeoutUs)
FFAMediaCodec * ff_AMediaCodec_createCodecByName(const char *name)
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:368
#define atomic_fetch_add(object, operand)
Definition: stdatomic.h:131
#define INPUT_DEQUEUE_TIMEOUT_US
OMX.k3.video.decoder.avc, OMX.NVIDIA.
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
int ff_AMediaCodec_cleanOutputBuffers(FFAMediaCodec *codec)
int ff_AMediaCodec_start(FFAMediaCodec *codec)
Libavcodec external API header.
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
main external API structure.
Definition: avcodec.h:1568
uint8_t * data
The data buffer.
Definition: buffer.h:89
int ff_AMediaCodecProfile_getProfileFromAVCodecContext(AVCodecContext *avctx)
The following API around MediaCodec and MediaFormat is based on the NDK one provided by Google since ...
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:383
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1968
int index
Definition: gxfenc.c:89
Rational number (pair of numerator and denominator).
Definition: rational.h:58
static void mediacodec_buffer_release(void *opaque, uint8_t *data)
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
int ff_AMediaCodec_infoTryAgainLater(FFAMediaCodec *codec, ssize_t idx)
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
int ff_AMediaFormat_delete(FFAMediaFormat *format)
mfxU16 profile
Definition: qsvenc.c:44
#define OUTPUT_DEQUEUE_BLOCK_TIMEOUT_US
hardware decoding through MediaCodec
Definition: pixfmt.h:293
static int64_t pts
#define flags(name, subs,...)
Definition: cbs_av1.c:561
uint8_t * ff_AMediaCodec_getOutputBuffer(FFAMediaCodec *codec, size_t idx, size_t *out_size)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
attribute_deprecated int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:396
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:404
int ff_AMediaCodec_configure(FFAMediaCodec *codec, const FFAMediaFormat *format, void *surface, void *crypto, uint32_t flags)
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
common internal api header.
common internal and external API header
static void ff_mediacodec_dec_ref(MediaCodecDecContext *s)
int den
Denominator.
Definition: rational.h:60
int ff_mediacodec_dec_init(AVCodecContext *avctx, MediaCodecDecContext *s, const char *mime, FFAMediaFormat *format)
static enum AVPixelFormat mcdec_map_color_format(AVCodecContext *avctx, MediaCodecDecContext *s, int color_format)
pixel format definitions
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:85
static int mediacodec_dec_parse_format(AVCodecContext *avctx, MediaCodecDecContext *s)
#define OUTPUT_DEQUEUE_TIMEOUT_US
FFAMediaFormat * ff_AMediaCodec_getOutputFormat(FFAMediaCodec *codec)
void * surface
android/view/Surface object reference.
Definition: mediacodec.h:38
int height
Definition: frame.h:353
#define atomic_init(obj, value)
Definition: stdatomic.h:33
#define av_freep(p)
static int mediacodec_wrap_hw_buffer(AVCodecContext *avctx, MediaCodecDecContext *s, ssize_t index, FFAMediaCodecBufferInfo *info, AVFrame *frame)
enum AVPixelFormat pix_fmt
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:3317
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:57
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
This structure stores compressed data.
Definition: avcodec.h:1457
static int mediacodec_dec_flush_codec(AVCodecContext *avctx, MediaCodecDecContext *s)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1473
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
GLuint buffer
Definition: opengl_enc.c:101