FFmpeg
mediacodecdec_common.c
Go to the documentation of this file.
1 /*
2  * Android MediaCodec decoder
3  *
4  * Copyright (c) 2015-2016 Matthieu Bouron <matthieu.bouron stupeflix.com>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include <string.h>
24 #include <sys/types.h>
25 
26 #include "libavutil/common.h"
28 #include "libavutil/mem.h"
29 #include "libavutil/log.h"
30 #include "libavutil/pixfmt.h"
31 #include "libavutil/time.h"
32 #include "libavutil/timestamp.h"
33 
34 #include "avcodec.h"
35 #include "internal.h"
36 
37 #include "mediacodec.h"
38 #include "mediacodec_surface.h"
39 #include "mediacodec_sw_buffer.h"
40 #include "mediacodec_wrapper.h"
41 #include "mediacodecdec_common.h"
42 
43 /**
44  * OMX.k3.video.decoder.avc, OMX.NVIDIA.* OMX.SEC.avc.dec and OMX.google
45  * codec workarounds used in various place are taken from the Gstreamer
46  * project.
47  *
48  * Gstreamer references:
49  * https://cgit.freedesktop.org/gstreamer/gst-plugins-bad/tree/sys/androidmedia/
50  *
51  * Gstreamer copyright notice:
52  *
53  * Copyright (C) 2012, Collabora Ltd.
54  * Author: Sebastian Dröge <sebastian.droege@collabora.co.uk>
55  *
56  * Copyright (C) 2012, Rafaël Carré <funman@videolanorg>
57  *
58  * Copyright (C) 2015, Sebastian Dröge <sebastian@centricular.com>
59  *
60  * Copyright (C) 2014-2015, Collabora Ltd.
61  * Author: Matthieu Bouron <matthieu.bouron@gcollabora.com>
62  *
63  * Copyright (C) 2015, Edward Hervey
64  * Author: Edward Hervey <bilboed@gmail.com>
65  *
66  * Copyright (C) 2015, Matthew Waters <matthew@centricular.com>
67  *
68  * This library is free software; you can redistribute it and/or
69  * modify it under the terms of the GNU Lesser General Public
70  * License as published by the Free Software Foundation
71  * version 2.1 of the License.
72  *
73  * This library is distributed in the hope that it will be useful,
74  * but WITHOUT ANY WARRANTY; without even the implied warranty of
75  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
76  * Lesser General Public License for more details.
77  *
78  * You should have received a copy of the GNU Lesser General Public
79  * License along with this library; if not, write to the Free Software
80  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
81  *
82  */
83 
84 #define INPUT_DEQUEUE_TIMEOUT_US 8000
85 #define OUTPUT_DEQUEUE_TIMEOUT_US 8000
86 #define OUTPUT_DEQUEUE_BLOCK_TIMEOUT_US 1000000
87 
88 enum {
98 };
99 
100 static const struct {
101 
104 
105 } color_formats[] = {
106 
109  { COLOR_QCOM_FormatYUV420SemiPlanar, AV_PIX_FMT_NV12 },
110  { COLOR_QCOM_FormatYUV420SemiPlanar32m, AV_PIX_FMT_NV12 },
112  { COLOR_TI_FormatYUV420PackedSemiPlanar, AV_PIX_FMT_NV12 },
114  { 0 }
115 };
116 
119  int color_format)
120 {
121  int i;
123 
124  if (s->surface) {
125  return AV_PIX_FMT_MEDIACODEC;
126  }
127 
128  if (!strcmp(s->codec_name, "OMX.k3.video.decoder.avc") && color_format == COLOR_FormatYCbYCr) {
130  }
131 
132  for (i = 0; i < FF_ARRAY_ELEMS(color_formats); i++) {
133  if (color_formats[i].color_format == color_format) {
134  return color_formats[i].pix_fmt;
135  }
136  }
137 
138  av_log(avctx, AV_LOG_ERROR, "Output color format 0x%x (value=%d) is not supported\n",
139  color_format, color_format);
140 
141  return ret;
142 }
143 
145 {
146  atomic_fetch_add(&s->refcount, 1);
147 }
148 
150 {
151  if (!s)
152  return;
153 
154  if (atomic_fetch_sub(&s->refcount, 1) == 1) {
155  if (s->codec) {
157  s->codec = NULL;
158  }
159 
160  if (s->format) {
162  s->format = NULL;
163  }
164 
165  if (s->surface) {
167  s->surface = NULL;
168  }
169 
170  av_freep(&s->codec_name);
171  av_freep(&s);
172  }
173 }
174 
175 static void mediacodec_buffer_release(void *opaque, uint8_t *data)
176 {
177  AVMediaCodecBuffer *buffer = opaque;
178  MediaCodecDecContext *ctx = buffer->ctx;
179  int released = atomic_load(&buffer->released);
180 
181  if (!released && (ctx->delay_flush || buffer->serial == atomic_load(&ctx->serial))) {
183  av_log(ctx->avctx, AV_LOG_DEBUG,
184  "Releasing output buffer %zd (%p) ts=%"PRId64" on free() [%d pending]\n",
185  buffer->index, buffer, buffer->pts, atomic_load(&ctx->hw_buffer_count));
186  ff_AMediaCodec_releaseOutputBuffer(ctx->codec, buffer->index, 0);
187  }
188 
189  if (ctx->delay_flush)
191  av_freep(&buffer);
192 }
193 
196  ssize_t index,
198  AVFrame *frame)
199 {
200  int ret = 0;
201  int status = 0;
202  AVMediaCodecBuffer *buffer = NULL;
203 
204  frame->buf[0] = NULL;
205  frame->width = avctx->width;
206  frame->height = avctx->height;
207  frame->format = avctx->pix_fmt;
209 
210  if (avctx->pkt_timebase.num && avctx->pkt_timebase.den) {
211  frame->pts = av_rescale_q(info->presentationTimeUs,
213  avctx->pkt_timebase);
214  } else {
215  frame->pts = info->presentationTimeUs;
216  }
217 #if FF_API_PKT_PTS
219  frame->pkt_pts = frame->pts;
221 #endif
222  frame->pkt_dts = AV_NOPTS_VALUE;
223 
224  buffer = av_mallocz(sizeof(AVMediaCodecBuffer));
225  if (!buffer) {
226  ret = AVERROR(ENOMEM);
227  goto fail;
228  }
229 
230  atomic_init(&buffer->released, 0);
231 
232  frame->buf[0] = av_buffer_create(NULL,
233  0,
235  buffer,
237 
238  if (!frame->buf[0]) {
239  ret = AVERROR(ENOMEM);
240  goto fail;
241 
242  }
243 
244  buffer->ctx = s;
245  buffer->serial = atomic_load(&s->serial);
246  if (s->delay_flush)
248 
249  buffer->index = index;
250  buffer->pts = info->presentationTimeUs;
251 
252  frame->data[3] = (uint8_t *)buffer;
253 
255  av_log(avctx, AV_LOG_DEBUG,
256  "Wrapping output buffer %zd (%p) ts=%"PRId64" [%d pending]\n",
257  buffer->index, buffer, buffer->pts, atomic_load(&s->hw_buffer_count));
258 
259  return 0;
260 fail:
261  av_freep(buffer);
262  av_buffer_unref(&frame->buf[0]);
263  status = ff_AMediaCodec_releaseOutputBuffer(s->codec, index, 0);
264  if (status < 0) {
265  av_log(avctx, AV_LOG_ERROR, "Failed to release output buffer\n");
266  ret = AVERROR_EXTERNAL;
267  }
268 
269  return ret;
270 }
271 
274  uint8_t *data,
275  size_t size,
276  ssize_t index,
278  AVFrame *frame)
279 {
280  int ret = 0;
281  int status = 0;
282 
283  frame->width = avctx->width;
284  frame->height = avctx->height;
285  frame->format = avctx->pix_fmt;
286 
287  /* MediaCodec buffers needs to be copied to our own refcounted buffers
288  * because the flush command invalidates all input and output buffers.
289  */
290  if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
291  av_log(avctx, AV_LOG_ERROR, "Could not allocate buffer\n");
292  goto done;
293  }
294 
295  /* Override frame->pkt_pts as ff_get_buffer will override its value based
296  * on the last avpacket received which is not in sync with the frame:
297  * * N avpackets can be pushed before 1 frame is actually returned
298  * * 0-sized avpackets are pushed to flush remaining frames at EOS */
299  if (avctx->pkt_timebase.num && avctx->pkt_timebase.den) {
300  frame->pts = av_rescale_q(info->presentationTimeUs,
302  avctx->pkt_timebase);
303  } else {
304  frame->pts = info->presentationTimeUs;
305  }
306 #if FF_API_PKT_PTS
308  frame->pkt_pts = frame->pts;
310 #endif
311  frame->pkt_dts = AV_NOPTS_VALUE;
312 
313  av_log(avctx, AV_LOG_TRACE,
314  "Frame: width=%d stride=%d height=%d slice-height=%d "
315  "crop-top=%d crop-bottom=%d crop-left=%d crop-right=%d encoder=%s "
316  "destination linesizes=%d,%d,%d\n" ,
317  avctx->width, s->stride, avctx->height, s->slice_height,
319  frame->linesize[0], frame->linesize[1], frame->linesize[2]);
320 
321  switch (s->color_format) {
323  ff_mediacodec_sw_buffer_copy_yuv420_planar(avctx, s, data, size, info, frame);
324  break;
328  ff_mediacodec_sw_buffer_copy_yuv420_semi_planar(avctx, s, data, size, info, frame);
329  break;
332  ff_mediacodec_sw_buffer_copy_yuv420_packed_semi_planar(avctx, s, data, size, info, frame);
333  break;
336  break;
337  default:
338  av_log(avctx, AV_LOG_ERROR, "Unsupported color format 0x%x (value=%d)\n",
339  s->color_format, s->color_format);
340  ret = AVERROR(EINVAL);
341  goto done;
342  }
343 
344  ret = 0;
345 done:
346  status = ff_AMediaCodec_releaseOutputBuffer(s->codec, index, 0);
347  if (status < 0) {
348  av_log(avctx, AV_LOG_ERROR, "Failed to release output buffer\n");
349  ret = AVERROR_EXTERNAL;
350  }
351 
352  return ret;
353 }
354 
355 #define AMEDIAFORMAT_GET_INT32(name, key, mandatory) do { \
356  int32_t value = 0; \
357  if (ff_AMediaFormat_getInt32(s->format, key, &value)) { \
358  (name) = value; \
359  } else if (mandatory) { \
360  av_log(avctx, AV_LOG_ERROR, "Could not get %s from format %s\n", key, format); \
361  ret = AVERROR_EXTERNAL; \
362  goto fail; \
363  } \
364 } while (0) \
365 
367 {
368  int ret = 0;
369  int width = 0;
370  int height = 0;
371  char *format = NULL;
372 
373  if (!s->format) {
374  av_log(avctx, AV_LOG_ERROR, "Output MediaFormat is not set\n");
375  return AVERROR(EINVAL);
376  }
377 
378  format = ff_AMediaFormat_toString(s->format);
379  if (!format) {
380  return AVERROR_EXTERNAL;
381  }
382  av_log(avctx, AV_LOG_DEBUG, "Parsing MediaFormat %s\n", format);
383 
384  /* Mandatory fields */
385  AMEDIAFORMAT_GET_INT32(s->width, "width", 1);
386  AMEDIAFORMAT_GET_INT32(s->height, "height", 1);
387 
388  AMEDIAFORMAT_GET_INT32(s->stride, "stride", 0);
389  s->stride = s->stride > 0 ? s->stride : s->width;
390 
391  AMEDIAFORMAT_GET_INT32(s->slice_height, "slice-height", 0);
392 
393  if (strstr(s->codec_name, "OMX.Nvidia.") && s->slice_height == 0) {
394  s->slice_height = FFALIGN(s->height, 16);
395  } else if (strstr(s->codec_name, "OMX.SEC.avc.dec")) {
396  s->slice_height = avctx->height;
397  s->stride = avctx->width;
398  } else if (s->slice_height == 0) {
399  s->slice_height = s->height;
400  }
401 
402  AMEDIAFORMAT_GET_INT32(s->color_format, "color-format", 1);
403  avctx->pix_fmt = mcdec_map_color_format(avctx, s, s->color_format);
404  if (avctx->pix_fmt == AV_PIX_FMT_NONE) {
405  av_log(avctx, AV_LOG_ERROR, "Output color format is not supported\n");
406  ret = AVERROR(EINVAL);
407  goto fail;
408  }
409 
410  /* Optional fields */
411  AMEDIAFORMAT_GET_INT32(s->crop_top, "crop-top", 0);
412  AMEDIAFORMAT_GET_INT32(s->crop_bottom, "crop-bottom", 0);
413  AMEDIAFORMAT_GET_INT32(s->crop_left, "crop-left", 0);
414  AMEDIAFORMAT_GET_INT32(s->crop_right, "crop-right", 0);
415 
416  width = s->crop_right + 1 - s->crop_left;
417  height = s->crop_bottom + 1 - s->crop_top;
418 
419  AMEDIAFORMAT_GET_INT32(s->display_width, "display-width", 0);
420  AMEDIAFORMAT_GET_INT32(s->display_height, "display-height", 0);
421 
422  if (s->display_width && s->display_height) {
423  AVRational sar = av_div_q(
425  (AVRational){ width, height });
426  ff_set_sar(avctx, sar);
427  }
428 
429  av_log(avctx, AV_LOG_INFO,
430  "Output crop parameters top=%d bottom=%d left=%d right=%d, "
431  "resulting dimensions width=%d height=%d\n",
432  s->crop_top, s->crop_bottom, s->crop_left, s->crop_right,
433  width, height);
434 
435  av_freep(&format);
436  return ff_set_dimensions(avctx, width, height);
437 fail:
438  av_freep(&format);
439  return ret;
440 }
441 
443 {
444  FFAMediaCodec *codec = s->codec;
445  int status;
446 
447  s->output_buffer_count = 0;
448 
449  s->draining = 0;
450  s->flushing = 0;
451  s->eos = 0;
452  atomic_fetch_add(&s->serial, 1);
454  s->current_input_buffer = -1;
455 
456  status = ff_AMediaCodec_flush(codec);
457  if (status < 0) {
458  av_log(avctx, AV_LOG_ERROR, "Failed to flush codec\n");
459  return AVERROR_EXTERNAL;
460  }
461 
462  return 0;
463 }
464 
466  const char *mime, FFAMediaFormat *format)
467 {
468  int ret = 0;
469  int status;
470  int profile;
471 
472  enum AVPixelFormat pix_fmt;
473  static const enum AVPixelFormat pix_fmts[] = {
476  };
477 
478  s->avctx = avctx;
479  atomic_init(&s->refcount, 1);
481  atomic_init(&s->serial, 1);
482  s->current_input_buffer = -1;
483 
484  pix_fmt = ff_get_format(avctx, pix_fmts);
485  if (pix_fmt == AV_PIX_FMT_MEDIACODEC) {
486  AVMediaCodecContext *user_ctx = avctx->hwaccel_context;
487 
488  if (avctx->hw_device_ctx) {
489  AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)(avctx->hw_device_ctx->data);
490  if (device_ctx->type == AV_HWDEVICE_TYPE_MEDIACODEC) {
491  if (device_ctx->hwctx) {
492  AVMediaCodecDeviceContext *mediacodec_ctx = (AVMediaCodecDeviceContext *)device_ctx->hwctx;
493  s->surface = ff_mediacodec_surface_ref(mediacodec_ctx->surface, avctx);
494  av_log(avctx, AV_LOG_INFO, "Using surface %p\n", s->surface);
495  }
496  }
497  }
498 
499  if (!s->surface && user_ctx && user_ctx->surface) {
500  s->surface = ff_mediacodec_surface_ref(user_ctx->surface, avctx);
501  av_log(avctx, AV_LOG_INFO, "Using surface %p\n", s->surface);
502  }
503  }
504 
506  if (profile < 0) {
507  av_log(avctx, AV_LOG_WARNING, "Unsupported or unknown profile\n");
508  }
509 
510  s->codec_name = ff_AMediaCodecList_getCodecNameByType(mime, profile, 0, avctx);
511  if (!s->codec_name) {
512  ret = AVERROR_EXTERNAL;
513  goto fail;
514  }
515 
516  av_log(avctx, AV_LOG_DEBUG, "Found decoder %s\n", s->codec_name);
518  if (!s->codec) {
519  av_log(avctx, AV_LOG_ERROR, "Failed to create media decoder for type %s and name %s\n", mime, s->codec_name);
520  ret = AVERROR_EXTERNAL;
521  goto fail;
522  }
523 
524  status = ff_AMediaCodec_configure(s->codec, format, s->surface, NULL, 0);
525  if (status < 0) {
526  char *desc = ff_AMediaFormat_toString(format);
527  av_log(avctx, AV_LOG_ERROR,
528  "Failed to configure codec (status = %d) with format %s\n",
529  status, desc);
530  av_freep(&desc);
531 
532  ret = AVERROR_EXTERNAL;
533  goto fail;
534  }
535 
536  status = ff_AMediaCodec_start(s->codec);
537  if (status < 0) {
538  char *desc = ff_AMediaFormat_toString(format);
539  av_log(avctx, AV_LOG_ERROR,
540  "Failed to start codec (status = %d) with format %s\n",
541  status, desc);
542  av_freep(&desc);
543  ret = AVERROR_EXTERNAL;
544  goto fail;
545  }
546 
548  if (s->format) {
549  if ((ret = mediacodec_dec_parse_format(avctx, s)) < 0) {
550  av_log(avctx, AV_LOG_ERROR,
551  "Failed to configure context\n");
552  goto fail;
553  }
554  }
555 
556  av_log(avctx, AV_LOG_DEBUG, "MediaCodec %p started successfully\n", s->codec);
557 
558  return 0;
559 
560 fail:
561  av_log(avctx, AV_LOG_ERROR, "MediaCodec %p failed to start\n", s->codec);
562  ff_mediacodec_dec_close(avctx, s);
563  return ret;
564 }
565 
567  AVPacket *pkt, bool wait)
568 {
569  int offset = 0;
570  int need_draining = 0;
571  uint8_t *data;
572  ssize_t index = s->current_input_buffer;
573  size_t size;
574  FFAMediaCodec *codec = s->codec;
575  int status;
576  int64_t input_dequeue_timeout_us = wait ? INPUT_DEQUEUE_TIMEOUT_US : 0;
577  int64_t pts;
578 
579  if (s->flushing) {
580  av_log(avctx, AV_LOG_ERROR, "Decoder is flushing and cannot accept new buffer "
581  "until all output buffers have been released\n");
582  return AVERROR_EXTERNAL;
583  }
584 
585  if (pkt->size == 0) {
586  need_draining = 1;
587  }
588 
589  if (s->draining && s->eos) {
590  return AVERROR_EOF;
591  }
592 
593  while (offset < pkt->size || (need_draining && !s->draining)) {
594  if (index < 0) {
595  index = ff_AMediaCodec_dequeueInputBuffer(codec, input_dequeue_timeout_us);
596  if (ff_AMediaCodec_infoTryAgainLater(codec, index)) {
597  av_log(avctx, AV_LOG_TRACE, "No input buffer available, try again later\n");
598  break;
599  }
600 
601  if (index < 0) {
602  av_log(avctx, AV_LOG_ERROR, "Failed to dequeue input buffer (status=%zd)\n", index);
603  return AVERROR_EXTERNAL;
604  }
605  }
606  s->current_input_buffer = -1;
607 
608  data = ff_AMediaCodec_getInputBuffer(codec, index, &size);
609  if (!data) {
610  av_log(avctx, AV_LOG_ERROR, "Failed to get input buffer\n");
611  return AVERROR_EXTERNAL;
612  }
613 
614  pts = pkt->pts;
615  if (pts != AV_NOPTS_VALUE && avctx->pkt_timebase.num && avctx->pkt_timebase.den) {
616  pts = av_rescale_q(pts, avctx->pkt_timebase, AV_TIME_BASE_Q);
617  }
618 
619  if (need_draining) {
621 
622  av_log(avctx, AV_LOG_DEBUG, "Sending End Of Stream signal\n");
623 
624  status = ff_AMediaCodec_queueInputBuffer(codec, index, 0, 0, pts, flags);
625  if (status < 0) {
626  av_log(avctx, AV_LOG_ERROR, "Failed to queue input empty buffer (status = %d)\n", status);
627  return AVERROR_EXTERNAL;
628  }
629 
630  av_log(avctx, AV_LOG_TRACE,
631  "Queued input buffer %zd size=%zd ts=%"PRIi64"\n", index, size, pts);
632 
633  s->draining = 1;
634  return 0;
635  }
636 
637  size = FFMIN(pkt->size - offset, size);
638  memcpy(data, pkt->data + offset, size);
639  offset += size;
640 
641  status = ff_AMediaCodec_queueInputBuffer(codec, index, 0, size, pts, 0);
642  if (status < 0) {
643  av_log(avctx, AV_LOG_ERROR, "Failed to queue input buffer (status = %d)\n", status);
644  return AVERROR_EXTERNAL;
645  }
646 
647  av_log(avctx, AV_LOG_TRACE,
648  "Queued input buffer %zd size=%zd ts=%"PRIi64"\n", index, size, pts);
649  }
650 
651  if (offset == 0)
652  return AVERROR(EAGAIN);
653  return offset;
654 }
655 
657  AVFrame *frame, bool wait)
658 {
659  int ret;
660  uint8_t *data;
661  ssize_t index;
662  size_t size;
663  FFAMediaCodec *codec = s->codec;
665  int status;
666  int64_t output_dequeue_timeout_us = OUTPUT_DEQUEUE_TIMEOUT_US;
667 
668  if (s->draining && s->eos) {
669  return AVERROR_EOF;
670  }
671 
672  if (s->draining) {
673  /* If the codec is flushing or need to be flushed, block for a fair
674  * amount of time to ensure we got a frame */
675  output_dequeue_timeout_us = OUTPUT_DEQUEUE_BLOCK_TIMEOUT_US;
676  } else if (s->output_buffer_count == 0 || !wait) {
677  /* If the codec hasn't produced any frames, do not block so we
678  * can push data to it as fast as possible, and get the first
679  * frame */
680  output_dequeue_timeout_us = 0;
681  }
682 
683  index = ff_AMediaCodec_dequeueOutputBuffer(codec, &info, output_dequeue_timeout_us);
684  if (index >= 0) {
685  av_log(avctx, AV_LOG_TRACE, "Got output buffer %zd"
686  " offset=%" PRIi32 " size=%" PRIi32 " ts=%" PRIi64
687  " flags=%" PRIu32 "\n", index, info.offset, info.size,
688  info.presentationTimeUs, info.flags);
689 
691  s->eos = 1;
692  }
693 
694  if (info.size) {
695  if (s->surface) {
696  if ((ret = mediacodec_wrap_hw_buffer(avctx, s, index, &info, frame)) < 0) {
697  av_log(avctx, AV_LOG_ERROR, "Failed to wrap MediaCodec buffer\n");
698  return ret;
699  }
700  } else {
701  data = ff_AMediaCodec_getOutputBuffer(codec, index, &size);
702  if (!data) {
703  av_log(avctx, AV_LOG_ERROR, "Failed to get output buffer\n");
704  return AVERROR_EXTERNAL;
705  }
706 
707  if ((ret = mediacodec_wrap_sw_buffer(avctx, s, data, size, index, &info, frame)) < 0) {
708  av_log(avctx, AV_LOG_ERROR, "Failed to wrap MediaCodec buffer\n");
709  return ret;
710  }
711  }
712 
713  s->output_buffer_count++;
714  return 0;
715  } else {
716  status = ff_AMediaCodec_releaseOutputBuffer(codec, index, 0);
717  if (status < 0) {
718  av_log(avctx, AV_LOG_ERROR, "Failed to release output buffer\n");
719  }
720  }
721 
722  } else if (ff_AMediaCodec_infoOutputFormatChanged(codec, index)) {
723  char *format = NULL;
724 
725  if (s->format) {
726  status = ff_AMediaFormat_delete(s->format);
727  if (status < 0) {
728  av_log(avctx, AV_LOG_ERROR, "Failed to delete MediaFormat %p\n", s->format);
729  }
730  }
731 
733  if (!s->format) {
734  av_log(avctx, AV_LOG_ERROR, "Failed to get output format\n");
735  return AVERROR_EXTERNAL;
736  }
737 
738  format = ff_AMediaFormat_toString(s->format);
739  if (!format) {
740  return AVERROR_EXTERNAL;
741  }
742  av_log(avctx, AV_LOG_INFO, "Output MediaFormat changed to %s\n", format);
743  av_freep(&format);
744 
745  if ((ret = mediacodec_dec_parse_format(avctx, s)) < 0) {
746  return ret;
747  }
748 
749  } else if (ff_AMediaCodec_infoOutputBuffersChanged(codec, index)) {
751  } else if (ff_AMediaCodec_infoTryAgainLater(codec, index)) {
752  if (s->draining) {
753  av_log(avctx, AV_LOG_ERROR, "Failed to dequeue output buffer within %" PRIi64 "ms "
754  "while draining remaining frames, output will probably lack frames\n",
755  output_dequeue_timeout_us / 1000);
756  } else {
757  av_log(avctx, AV_LOG_TRACE, "No output buffer available, try again later\n");
758  }
759  } else {
760  av_log(avctx, AV_LOG_ERROR, "Failed to dequeue output buffer (status=%zd)\n", index);
761  return AVERROR_EXTERNAL;
762  }
763 
764  return AVERROR(EAGAIN);
765 }
766 
767 /*
768 * ff_mediacodec_dec_flush returns 0 if the flush cannot be performed on
769 * the codec (because the user retains frames). The codec stays in the
770 * flushing state.
771 *
772 * ff_mediacodec_dec_flush returns 1 if the flush can actually be
773 * performed on the codec. The codec leaves the flushing state and can
774 * process again packets.
775 *
776 * ff_mediacodec_dec_flush returns a negative value if an error has
777 * occurred.
778 */
780 {
781  if (!s->surface || atomic_load(&s->refcount) == 1) {
782  int ret;
783 
784  /* No frames (holding a reference to the codec) are retained by the
785  * user, thus we can flush the codec and returns accordingly */
786  if ((ret = mediacodec_dec_flush_codec(avctx, s)) < 0) {
787  return ret;
788  }
789 
790  return 1;
791  }
792 
793  s->flushing = 1;
794  return 0;
795 }
796 
798 {
800 
801  return 0;
802 }
803 
805 {
806  return s->flushing;
807 }
This struct aggregates all the (hardware/vendor-specific) "high-level" state, i.e.
Definition: hwcontext.h:60
This structure holds a reference to a android/view/Surface object that will be used as output by the ...
Definition: mediacodec.h:33
#define NULL
Definition: coverity.c:32
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1371
#define AMEDIAFORMAT_GET_INT32(name, key, mandatory)
AVRational av_div_q(AVRational b, AVRational c)
Divide one rational by another.
Definition: rational.c:88
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
This structure describes decoded (raw) audio or video data.
Definition: frame.h:268
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
char * ff_AMediaCodecList_getCodecNameByType(const char *mime, int profile, int encoder, void *log_ctx)
Memory handling functions.
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:459
const char * desc
Definition: nvenc.c:68
void * ff_mediacodec_surface_ref(void *surface, void *log_ctx)
int num
Numerator.
Definition: rational.h:59
int size
Definition: avcodec.h:1478
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:1944
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1775
void ff_mediacodec_sw_buffer_copy_yuv420_semi_planar(AVCodecContext *avctx, MediaCodecDecContext *s, uint8_t *data, size_t size, FFAMediaCodecBufferInfo *info, AVFrame *frame)
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
static AVPacket pkt
void ff_mediacodec_sw_buffer_copy_yuv420_planar(AVCodecContext *avctx, MediaCodecDecContext *s, uint8_t *data, size_t size, FFAMediaCodecBufferInfo *info, AVFrame *frame)
The code handling the various YUV color formats is taken from the GStreamer project.
int ff_mediacodec_dec_send(AVCodecContext *avctx, MediaCodecDecContext *s, AVPacket *pkt, bool wait)
int ff_mediacodec_dec_close(AVCodecContext *avctx, MediaCodecDecContext *s)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate.The lists are not just lists
int ff_mediacodec_dec_is_flushing(AVCodecContext *avctx, MediaCodecDecContext *s)
int ff_AMediaCodec_flush(FFAMediaCodec *codec)
int ff_AMediaCodec_releaseOutputBuffer(FFAMediaCodec *codec, size_t idx, int render)
uint8_t
void * hwaccel_context
Hardware accelerator context.
Definition: avcodec.h:2737
timestamp utils, mostly useful for debugging/logging purposes
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:202
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
int ff_AMediaCodec_infoOutputBuffersChanged(FFAMediaCodec *codec, ssize_t idx)
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:361
void * hwctx
The format-specific data, allocated and freed by libavutil along with this context.
Definition: hwcontext.h:91
void * surface
android/view/Surface handle, to be filled by the user.
#define height
int ff_AMediaCodec_infoOutputFormatChanged(FFAMediaCodec *codec, ssize_t idx)
uint8_t * data
Definition: avcodec.h:1477
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
Definition: utils.c:119
#define AVERROR_EOF
End of file.
Definition: error.h:55
#define AV_BUFFER_FLAG_READONLY
Always treat the buffer as read-only, even when it has only one reference.
Definition: buffer.h:113
char * ff_AMediaFormat_toString(FFAMediaFormat *format)
ptrdiff_t size
Definition: opengl_enc.c:100
int ff_mediacodec_surface_unref(void *surface, void *log_ctx)
int color_format
static void ff_mediacodec_dec_unref(MediaCodecDecContext *s)
#define FFALIGN(x, a)
Definition: macros.h:48
#define av_log(a,...)
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
Definition: avcodec.h:3115
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:260
int width
Definition: frame.h:326
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define atomic_load(object)
Definition: stdatomic.h:93
int ff_mediacodec_dec_flush(AVCodecContext *avctx, MediaCodecDecContext *s)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
uint8_t * ff_AMediaCodec_getInputBuffer(FFAMediaCodec *codec, size_t idx, size_t *out_size)
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
AVBufferRef * av_buffer_create(uint8_t *data, int size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:28
#define atomic_fetch_sub(object, operand)
Definition: stdatomic.h:137
enum AVHWDeviceType type
This field identifies the underlying API used for hardware access.
Definition: hwcontext.h:78
#define fail()
Definition: checkasm.h:120
int ff_mediacodec_dec_receive(AVCodecContext *avctx, MediaCodecDecContext *s, AVFrame *frame, bool wait)
void ff_mediacodec_sw_buffer_copy_yuv420_packed_semi_planar(AVCodecContext *avctx, MediaCodecDecContext *s, uint8_t *data, size_t size, FFAMediaCodecBufferInfo *info, AVFrame *frame)
MIPS optimizations info
Definition: mips.txt:2
int ff_AMediaCodec_queueInputBuffer(FFAMediaCodec *codec, size_t idx, off_t offset, size_t size, uint64_t time, uint32_t flags)
#define FFMIN(a, b)
Definition: common.h:96
ssize_t ff_AMediaCodec_dequeueOutputBuffer(FFAMediaCodec *codec, FFAMediaCodecBufferInfo *info, int64_t timeoutUs)
#define width
int width
picture width / height.
Definition: avcodec.h:1738
void ff_mediacodec_sw_buffer_copy_yuv420_packed_semi_planar_64x32Tile2m8ka(AVCodecContext *avctx, MediaCodecDecContext *s, uint8_t *data, size_t size, FFAMediaCodecBufferInfo *info, AVFrame *frame)
static int mediacodec_wrap_sw_buffer(AVCodecContext *avctx, MediaCodecDecContext *s, uint8_t *data, size_t size, ssize_t index, FFAMediaCodecBufferInfo *info, AVFrame *frame)
int ff_AMediaCodec_delete(FFAMediaCodec *codec)
AVFormatContext * ctx
Definition: movenc.c:48
int ff_AMediaCodec_getBufferFlagEndOfStream(FFAMediaCodec *codec)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
#define FF_ARRAY_ELEMS(a)
ssize_t ff_AMediaCodec_dequeueInputBuffer(FFAMediaCodec *codec, int64_t timeoutUs)
FFAMediaCodec * ff_AMediaCodec_createCodecByName(const char *name)
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:341
#define atomic_fetch_add(object, operand)
Definition: stdatomic.h:131
#define INPUT_DEQUEUE_TIMEOUT_US
OMX.k3.video.decoder.avc, OMX.NVIDIA.
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
int ff_AMediaCodec_cleanOutputBuffers(FFAMediaCodec *codec)
int ff_AMediaCodec_start(FFAMediaCodec *codec)
Libavcodec external API header.
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:299
main external API structure.
Definition: avcodec.h:1565
uint8_t * data
The data buffer.
Definition: buffer.h:89
int ff_AMediaCodecProfile_getProfileFromAVCodecContext(AVCodecContext *avctx)
The following API around MediaCodec and MediaFormat is based on the NDK one provided by Google since ...
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:356
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1964
int index
Definition: gxfenc.c:89
Rational number (pair of numerator and denominator).
Definition: rational.h:58
static void mediacodec_buffer_release(void *opaque, uint8_t *data)
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
int ff_AMediaCodec_infoTryAgainLater(FFAMediaCodec *codec, ssize_t idx)
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
int ff_AMediaFormat_delete(FFAMediaFormat *format)
mfxU16 profile
Definition: qsvenc.c:44
#define OUTPUT_DEQUEUE_BLOCK_TIMEOUT_US
hardware decoding through MediaCodec
Definition: pixfmt.h:293
static int64_t pts
#define flags(name, subs,...)
Definition: cbs_av1.c:561
uint8_t * ff_AMediaCodec_getOutputBuffer(FFAMediaCodec *codec, size_t idx, size_t *out_size)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:282
attribute_deprecated int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:369
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:377
int ff_AMediaCodec_configure(FFAMediaCodec *codec, const FFAMediaFormat *format, void *surface, void *crypto, uint32_t flags)
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
common internal api header.
common internal and external API header
static void ff_mediacodec_dec_ref(MediaCodecDecContext *s)
int den
Denominator.
Definition: rational.h:60
int ff_mediacodec_dec_init(AVCodecContext *avctx, MediaCodecDecContext *s, const char *mime, FFAMediaFormat *format)
static enum AVPixelFormat mcdec_map_color_format(AVCodecContext *avctx, MediaCodecDecContext *s, int color_format)
pixel format definitions
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:85
static int mediacodec_dec_parse_format(AVCodecContext *avctx, MediaCodecDecContext *s)
#define OUTPUT_DEQUEUE_TIMEOUT_US
FFAMediaFormat * ff_AMediaCodec_getOutputFormat(FFAMediaCodec *codec)
void * surface
android/view/Surface object reference.
Definition: mediacodec.h:38
int height
Definition: frame.h:326
#define atomic_init(obj, value)
Definition: stdatomic.h:33
#define av_freep(p)
static int mediacodec_wrap_hw_buffer(AVCodecContext *avctx, MediaCodecDecContext *s, ssize_t index, FFAMediaCodecBufferInfo *info, AVFrame *frame)
enum AVPixelFormat pix_fmt
static const struct @117 color_formats[]
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:3310
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:57
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
This structure stores compressed data.
Definition: avcodec.h:1454
static int mediacodec_dec_flush_codec(AVCodecContext *avctx, MediaCodecDecContext *s)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1470
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
GLuint buffer
Definition: opengl_enc.c:101