[FFmpeg-devel] [PATCH 2/3] libavcodec: v4l2m2m: output AVDRMFrameDescriptor

Lukas Rusak lorusak at gmail.com
Tue May 8 21:24:05 EEST 2018


This is V2 of my previous patch. I have worked together with Jorge to get this working properly.
We have made it so that AV_PIX_FMT_DRM_PRIME output can be selected by setting avctx->pix_fmt.
This allows v4l2 to export the buffer so we can use it for zero-copy. If AV_PIX_FMT_DRM_PRIME is
not selected then the standard pixel formats will be used and the buffers will not be exported.

---
 libavcodec/v4l2_buffers.c | 228 +++++++++++++++++++++++++++++++-------
 libavcodec/v4l2_buffers.h |   5 +-
 libavcodec/v4l2_context.c |  40 ++++++-
 libavcodec/v4l2_m2m.c     |   4 +-
 libavcodec/v4l2_m2m.h     |   3 +
 libavcodec/v4l2_m2m_dec.c |  23 ++++
 6 files changed, 257 insertions(+), 46 deletions(-)

diff --git a/libavcodec/v4l2_buffers.c b/libavcodec/v4l2_buffers.c
index aef911f3bb..d715bc6a7c 100644
--- a/libavcodec/v4l2_buffers.c
+++ b/libavcodec/v4l2_buffers.c
@@ -21,6 +21,7 @@
  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  */
 
+#include <drm/drm_fourcc.h>
 #include <linux/videodev2.h>
 #include <sys/ioctl.h>
 #include <sys/mman.h>
@@ -29,6 +30,7 @@
 #include <poll.h>
 #include "libavcodec/avcodec.h"
 #include "libavcodec/internal.h"
+#include "libavutil/hwcontext.h"
 #include "v4l2_context.h"
 #include "v4l2_buffers.h"
 #include "v4l2_m2m.h"
@@ -203,7 +205,65 @@ static enum AVColorTransferCharacteristic v4l2_get_color_trc(V4L2Buffer *buf)
     return AVCOL_TRC_UNSPECIFIED;
 }
 
-static void v4l2_free_buffer(void *opaque, uint8_t *unused)
+static uint8_t * v4l2_get_drm_frame(V4L2Buffer *avbuf)
+{
+    AVDRMFrameDescriptor *drm_desc = &avbuf->drm_frame;
+    AVDRMLayerDescriptor *layer;
+
+    /* fill the DRM frame descriptor */
+    drm_desc->nb_objects = 1;
+    drm_desc->nb_layers = 1;
+
+    layer = &drm_desc->layers[0];
+    layer->planes[0].object_index = 0;
+    layer->planes[0].offset = 0;
+    layer->planes[0].pitch = avbuf->plane_info[0].bytesperline;
+
+    switch (avbuf->context->av_pix_fmt) {
+    case AV_PIX_FMT_NV12:
+    case AV_PIX_FMT_NV21:
+
+        if (avbuf->num_planes > 1)
+            break;
+
+        layer->format = avbuf->context->av_pix_fmt == AV_PIX_FMT_NV12 ?
+            DRM_FORMAT_NV12 : DRM_FORMAT_NV21;
+        layer->nb_planes = 2;
+
+        layer->planes[1].object_index = 0;
+        layer->planes[1].offset = avbuf->plane_info[0].bytesperline *
+            avbuf->context->format.fmt.pix_mp.height;
+        layer->planes[1].pitch = avbuf->plane_info[0].bytesperline;
+        break;
+
+    case AV_PIX_FMT_YUV420P:
+
+        if (avbuf->num_planes > 1)
+            break;
+
+        layer->format = DRM_FORMAT_YUV420;
+        layer->nb_planes = 3;
+
+        layer->planes[1].object_index = 0;
+        layer->planes[1].offset = avbuf->plane_info[0].bytesperline *
+            avbuf->context->format.fmt.pix_mp.height;
+        layer->planes[1].pitch = avbuf->plane_info[0].bytesperline >> 1;
+
+        layer->planes[2].object_index = 0;
+        layer->planes[2].offset = layer->planes[1].offset +
+            ((avbuf->plane_info[0].bytesperline *
+              avbuf->context->format.fmt.pix_mp.height) >> 2);
+        layer->planes[2].pitch = avbuf->plane_info[0].bytesperline >> 1;
+        break;
+
+    default:
+        break;
+    }
+
+    return (uint8_t *) drm_desc;
+}
+
+static void v4l2_free_buffer(void *opaque, uint8_t *data)
 {
     V4L2Buffer* avbuf = opaque;
     V4L2m2mContext *s = buf_to_m2mctx(avbuf);
@@ -227,27 +287,49 @@ static void v4l2_free_buffer(void *opaque, uint8_t *unused)
     }
 }
 
-static int v4l2_buf_to_bufref(V4L2Buffer *in, int plane, AVBufferRef **buf)
+static int v4l2_buffer_export_drm(V4L2Buffer* avbuf)
 {
-    V4L2m2mContext *s = buf_to_m2mctx(in);
+    struct v4l2_exportbuffer expbuf;
+    int i, ret;
 
-    if (plane >= in->num_planes)
-        return AVERROR(EINVAL);
+    for (i = 0; i < avbuf->num_planes; i++) {
+        memset(&expbuf, 0, sizeof(expbuf));
 
-    /* even though most encoders return 0 in data_offset encoding vp8 does require this value */
-    *buf = av_buffer_create((char *)in->plane_info[plane].mm_addr + in->planes[plane].data_offset,
-                            in->plane_info[plane].length, v4l2_free_buffer, in, 0);
-    if (!*buf)
-        return AVERROR(ENOMEM);
+        expbuf.index = avbuf->buf.index;
+        expbuf.type = avbuf->buf.type;
+        expbuf.plane = i;
+
+        ret = ioctl(buf_to_m2mctx(avbuf)->fd, VIDIOC_EXPBUF, &expbuf);
+        if (ret < 0)
+            return AVERROR(errno);
+
+        if (V4L2_TYPE_IS_MULTIPLANAR(avbuf->buf.type)) {
+            avbuf->buf.m.planes[i].m.fd = expbuf.fd;
+            /* drm frame */
+            avbuf->drm_frame.objects[i].size = avbuf->buf.m.planes[i].length;
+            avbuf->drm_frame.objects[i].fd = expbuf.fd;
+        } else {
+            avbuf->buf.m.fd = expbuf.fd;
+            /* drm frame */
+            avbuf->drm_frame.objects[0].size = avbuf->buf.length;
+            avbuf->drm_frame.objects[0].fd = expbuf.fd;
+        }
+    }
+
+    return 0;
+}
+
+static int v4l2_buf_increase_ref(V4L2Buffer *in)
+{
+    V4L2m2mContext *s = buf_to_m2mctx(in);
 
     if (in->context_ref)
         atomic_fetch_add(&in->context_refcount, 1);
     else {
         in->context_ref = av_buffer_ref(s->self_ref);
-        if (!in->context_ref) {
-            av_buffer_unref(buf);
+        if (!in->context_ref)
             return AVERROR(ENOMEM);
-        }
+
         in->context_refcount = 1;
     }
 
@@ -257,6 +339,46 @@ static int v4l2_buf_to_bufref(V4L2Buffer *in, int plane, AVBufferRef **buf)
     return 0;
 }
 
+static int v4l2_buf_to_bufref_drm(V4L2Buffer *in, AVBufferRef **buf)
+{
+    int ret;
+
+    *buf = av_buffer_create((uint8_t *) &in->drm_frame,
+                            sizeof(in->drm_frame),
+                            v4l2_free_buffer,
+                            in, AV_BUFFER_FLAG_READONLY);
+    if (!*buf)
+        return AVERROR(ENOMEM);
+
+    ret = v4l2_buf_increase_ref(in);
+    if (ret)
+         av_buffer_unref(buf);
+
+    return ret;
+}
+
+static int v4l2_buf_to_bufref(V4L2Buffer *in, int plane, AVBufferRef **buf)
+{
+    int ret;
+
+    if (plane >= in->num_planes)
+        return AVERROR(EINVAL);
+
+    /* most encoders return 0 in data_offset but vp8 does require this value */
+    *buf = av_buffer_create((char *)in->plane_info[plane].mm_addr + in->planes[plane].data_offset,
+                            in->plane_info[plane].length,
+                            v4l2_free_buffer,
+                            in, 0);
+    if (!*buf)
+        return AVERROR(ENOMEM);
+
+    ret = v4l2_buf_increase_ref(in);
+    if (ret)
+        av_buffer_unref(buf);
+
+    return ret;
+}
+
 static int v4l2_bufref_to_buf(V4L2Buffer *out, int plane, const uint8_t* data, int size, AVBufferRef* bref)
 {
     unsigned int bytesused, length;
@@ -267,7 +389,8 @@ static int v4l2_bufref_to_buf(V4L2Buffer *out, int plane, const uint8_t* data, i
     bytesused = FFMIN(size, out->plane_info[plane].length);
     length = out->plane_info[plane].length;
 
-    memcpy(out->plane_info[plane].mm_addr, data, FFMIN(size, out->plane_info[plane].length));
+    memcpy(out->plane_info[plane].mm_addr, data,
+           FFMIN(size, out->plane_info[plane].length));
 
     if (V4L2_TYPE_IS_MULTIPLANAR(out->buf.type)) {
         out->planes[plane].bytesused = bytesused;
@@ -291,7 +414,10 @@ int ff_v4l2_buffer_avframe_to_buf(const AVFrame *frame, V4L2Buffer* out)
     int i, ret;
 
     for(i = 0; i < out->num_planes; i++) {
-        ret = v4l2_bufref_to_buf(out, i, frame->buf[i]->data, frame->buf[i]->size, frame->buf[i]);
+        ret = v4l2_bufref_to_buf(out, i,
+                                frame->buf[i]->data,
+                                frame->buf[i]->size,
+                                frame->buf[i]);
         if (ret)
             return ret;
     }
@@ -308,34 +434,46 @@ int ff_v4l2_buffer_buf_to_avframe(AVFrame *frame, V4L2Buffer *avbuf)
 
     av_frame_unref(frame);
 
-    /* 1. get references to the actual data */
-    for (i = 0; i < avbuf->num_planes; i++) {
-        ret = v4l2_buf_to_bufref(avbuf, i, &frame->buf[i]);
+    if (buf_to_m2mctx(avbuf)->output_drm) {
+        /* 1. get references to the actual data */
+        ret = v4l2_buf_to_bufref_drm(avbuf, &frame->buf[0]);
         if (ret)
             return ret;
 
-        frame->linesize[i] = avbuf->plane_info[i].bytesperline;
-        frame->data[i] = frame->buf[i]->data;
-    }
+        frame->data[0] = (uint8_t *) v4l2_get_drm_frame(avbuf);
+        frame->format = AV_PIX_FMT_DRM_PRIME;
+    } else {
+        /* 1. get references to the actual data */
+        for (i = 0; i < avbuf->num_planes; i++) {
+            ret = v4l2_buf_to_bufref(avbuf, i, &frame->buf[i]);
+            if (ret)
+                return ret;
+
+            frame->linesize[i] = avbuf->plane_info[i].bytesperline;
+            frame->data[i] = frame->buf[i]->data;
+        }
 
-    /* 1.1 fixup special cases */
-    switch (avbuf->context->av_pix_fmt) {
-    case AV_PIX_FMT_NV12:
-        if (avbuf->num_planes > 1)
+        /* 1.1 fixup special cases */
+        switch (avbuf->context->av_pix_fmt) {
+        case AV_PIX_FMT_NV12:
+            if (avbuf->num_planes > 1)
+                break;
+            frame->linesize[1] = avbuf->plane_info[0].bytesperline;
+            frame->data[1] = frame->buf[0]->data +
+                avbuf->plane_info[0].bytesperline *
+                avbuf->context->format.fmt.pix_mp.height;
             break;
-        frame->linesize[1] = avbuf->plane_info[0].bytesperline;
-        frame->data[1] = frame->buf[0]->data + avbuf->plane_info[0].bytesperline * avbuf->context->format.fmt.pix_mp.height;
-        break;
-    default:
-        break;
+        default:
+            break;
+        }
+        frame->format = avbuf->context->av_pix_fmt;
     }
 
     /* 2. get frame information */
     frame->key_frame = !!(avbuf->buf.flags & V4L2_BUF_FLAG_KEYFRAME);
-    frame->format = avbuf->context->av_pix_fmt;
     frame->color_primaries = v4l2_get_color_primaries(avbuf);
-    frame->colorspace = v4l2_get_color_space(avbuf);
     frame->color_range = v4l2_get_color_range(avbuf);
+    frame->colorspace = v4l2_get_color_space(avbuf);
     frame->color_trc = v4l2_get_color_trc(avbuf);
     frame->pts = v4l2_get_pts(avbuf);
 
@@ -361,7 +499,8 @@ int ff_v4l2_buffer_buf_to_avpkt(AVPacket *pkt, V4L2Buffer *avbuf)
     if (ret)
         return ret;
 
-    pkt->size = V4L2_TYPE_IS_MULTIPLANAR(avbuf->buf.type) ? avbuf->buf.m.planes[0].bytesused : avbuf->buf.bytesused;
+    pkt->size = V4L2_TYPE_IS_MULTIPLANAR(avbuf->buf.type) ?
+        avbuf->buf.m.planes[0].bytesused : avbuf->buf.bytesused;
     pkt->data = pkt->buf->data;
 
     if (avbuf->buf.flags & V4L2_BUF_FLAG_KEYFRAME)
@@ -417,6 +556,7 @@ int ff_v4l2_buffer_initialize(V4L2Buffer* avbuf, int index)
             /* in MP, the V4L2 API states that buf.length means num_planes */
             if (avbuf->num_planes >= avbuf->buf.length)
                 break;
+
             if (avbuf->buf.m.planes[avbuf->num_planes].length)
                 avbuf->num_planes++;
         }
@@ -433,12 +573,14 @@ int ff_v4l2_buffer_initialize(V4L2Buffer* avbuf, int index)
             avbuf->plane_info[i].length = avbuf->buf.m.planes[i].length;
             avbuf->plane_info[i].mm_addr = mmap(NULL, avbuf->buf.m.planes[i].length,
                                            PROT_READ | PROT_WRITE, MAP_SHARED,
-                                           buf_to_m2mctx(avbuf)->fd, avbuf->buf.m.planes[i].m.mem_offset);
+                                           buf_to_m2mctx(avbuf)->fd,
+                                           avbuf->buf.m.planes[i].m.mem_offset);
         } else {
             avbuf->plane_info[i].length = avbuf->buf.length;
             avbuf->plane_info[i].mm_addr = mmap(NULL, avbuf->buf.length,
                                           PROT_READ | PROT_WRITE, MAP_SHARED,
-                                          buf_to_m2mctx(avbuf)->fd, avbuf->buf.m.offset);
+                                          buf_to_m2mctx(avbuf)->fd,
+                                          avbuf->buf.m.offset);
         }
 
         if (avbuf->plane_info[i].mm_addr == MAP_FAILED)
@@ -447,18 +589,23 @@ int ff_v4l2_buffer_initialize(V4L2Buffer* avbuf, int index)
 
     avbuf->status = V4L2BUF_AVAILABLE;
 
-    if (V4L2_TYPE_IS_OUTPUT(ctx->type))
-        return 0;
-
     if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
-        avbuf->buf.m.planes = avbuf->planes;
         avbuf->buf.length   = avbuf->num_planes;
-
+        avbuf->buf.m.planes = avbuf->planes;
     } else {
         avbuf->buf.bytesused = avbuf->planes[0].bytesused;
         avbuf->buf.length    = avbuf->planes[0].length;
     }
 
+    if (V4L2_TYPE_IS_OUTPUT(ctx->type))
+        return 0;
+
+    if (buf_to_m2mctx(avbuf)->output_drm) {
+        ret = v4l2_buffer_export_drm(avbuf);
+        if (ret)
+                return ret;
+    }
+
     return ff_v4l2_buffer_enqueue(avbuf);
 }
 
@@ -476,3 +623,4 @@ int ff_v4l2_buffer_enqueue(V4L2Buffer* avbuf)
 
     return 0;
 }
+
diff --git a/libavcodec/v4l2_buffers.h b/libavcodec/v4l2_buffers.h
index dc5cc9e267..c609a6c676 100644
--- a/libavcodec/v4l2_buffers.h
+++ b/libavcodec/v4l2_buffers.h
@@ -27,6 +27,7 @@
 #include <stdatomic.h>
 #include <linux/videodev2.h>
 
+#include "libavutil/hwcontext_drm.h"
 #include "avcodec.h"
 
 enum V4L2Buffer_status {
@@ -42,6 +43,9 @@ typedef struct V4L2Buffer {
     /* each buffer needs to have a reference to its context */
     struct V4L2Context *context;
 
+    /* DRM descriptor */
+    AVDRMFrameDescriptor drm_frame;
+
     /* This object is refcounted per-plane, so we need to keep track
      * of how many context-refs we are holding. */
     AVBufferRef *context_ref;
@@ -127,5 +131,4 @@ int ff_v4l2_buffer_initialize(V4L2Buffer* avbuf, int index);
  */
 int ff_v4l2_buffer_enqueue(V4L2Buffer* avbuf);
 
-
 #endif // AVCODEC_V4L2_BUFFERS_H
diff --git a/libavcodec/v4l2_context.c b/libavcodec/v4l2_context.c
index efcb0426e4..9457fadb1e 100644
--- a/libavcodec/v4l2_context.c
+++ b/libavcodec/v4l2_context.c
@@ -393,22 +393,54 @@ static int v4l2_release_buffers(V4L2Context* ctx)
     struct v4l2_requestbuffers req = {
         .memory = V4L2_MEMORY_MMAP,
         .type = ctx->type,
-        .count = 0, /* 0 -> unmaps buffers from the driver */
+        .count = 0, /* 0 -> unmap all buffers from the driver */
     };
-    int i, j;
+    int ret, i, j;
 
     for (i = 0; i < ctx->num_buffers; i++) {
         V4L2Buffer *buffer = &ctx->buffers[i];
 
         for (j = 0; j < buffer->num_planes; j++) {
             struct V4L2Plane_info *p = &buffer->plane_info[j];
+
+            if (V4L2_TYPE_IS_OUTPUT(ctx->type)) {
+                /* output buffers are not EXPORTED */
+                goto unmap;
+            }
+
+            if (ctx_to_m2mctx(ctx)->output_drm) {
+                /* use the DRM frame to close */
+                if (buffer->drm_frame.objects[j].fd >= 0) {
+                    if (close(buffer->drm_frame.objects[j].fd) < 0) {
+                        av_log(logger(ctx), AV_LOG_ERROR, "%s close drm fd "
+                            "[buffer=%2d, plane=%d, fd=%2d] - %s \n",
+                            ctx->name, i, j, buffer->drm_frame.objects[j].fd,
+                            av_err2str(AVERROR(errno)));
+                    }
+                }
+            }
+unmap:
             if (p->mm_addr && p->length)
                 if (munmap(p->mm_addr, p->length) < 0)
-                    av_log(logger(ctx), AV_LOG_ERROR, "%s unmap plane (%s))\n", ctx->name, av_err2str(AVERROR(errno)));
+                    av_log(logger(ctx), AV_LOG_ERROR, "%s unmap plane (%s))\n",
+                        ctx->name, av_err2str(AVERROR(errno)));
         }
     }
 
-    return ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_REQBUFS, &req);
+    ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_REQBUFS, &req);
+    if (ret < 0) {
+            av_log(logger(ctx), AV_LOG_ERROR, "release all %s buffers (%s)\n",
+                ctx->name, av_err2str(AVERROR(errno)));
+
+            if (ctx_to_m2mctx(ctx)->output_drm)
+                av_log(logger(ctx), AV_LOG_ERROR,
+                    "Make sure the DRM client releases all FB/GEM objects before closing the codec (ie):\n"
+                    "for all buffers: \n"
+                    "  1. drmModeRmFB(..)\n"
+                    "  2. drmIoctl(.., DRM_IOCTL_GEM_CLOSE,... )\n");
+    }
+
+    return ret;
 }
 
 static inline int v4l2_try_raw_format(V4L2Context* ctx, enum AVPixelFormat pixfmt)
diff --git a/libavcodec/v4l2_m2m.c b/libavcodec/v4l2_m2m.c
index 427e165f58..7896326e80 100644
--- a/libavcodec/v4l2_m2m.c
+++ b/libavcodec/v4l2_m2m.c
@@ -159,7 +159,9 @@ static int v4l2_configure_contexts(V4L2m2mContext* s)
         goto error;
     }
 
-    /* decoder's buffers need to be updated at a later stage */
+    /* decoder's capture buffers are updated during v4l2_try_start once we find
+     * the valid format.
+     */
     if (!av_codec_is_decoder(s->avctx->codec)) {
         ret = ff_v4l2_context_init(&s->capture);
         if (ret) {
diff --git a/libavcodec/v4l2_m2m.h b/libavcodec/v4l2_m2m.h
index 452bf0d9bc..9ac5a2448d 100644
--- a/libavcodec/v4l2_m2m.h
+++ b/libavcodec/v4l2_m2m.h
@@ -59,6 +59,9 @@ typedef struct V4L2m2mContext {
 
     /* Reference to self; only valid while codec is active. */
     AVBufferRef *self_ref;
+
+    /* generate DRM frames */
+    int output_drm;
 } V4L2m2mContext;
 
 typedef struct V4L2m2mPriv
diff --git a/libavcodec/v4l2_m2m_dec.c b/libavcodec/v4l2_m2m_dec.c
index ed5193ecc1..2b33badb08 100644
--- a/libavcodec/v4l2_m2m_dec.c
+++ b/libavcodec/v4l2_m2m_dec.c
@@ -23,12 +23,18 @@
 
 #include <linux/videodev2.h>
 #include <sys/ioctl.h>
+
+#include "libavutil/hwcontext.h"
+#include "libavutil/hwcontext_drm.h"
 #include "libavutil/pixfmt.h"
 #include "libavutil/pixdesc.h"
 #include "libavutil/opt.h"
 #include "libavcodec/avcodec.h"
 #include "libavcodec/decode.h"
 
+#include "libavcodec/hwaccel.h"
+#include "libavcodec/internal.h"
+
 #include "v4l2_context.h"
 #include "v4l2_m2m.h"
 #include "v4l2_fmt.h"
@@ -183,6 +189,15 @@ static av_cold int v4l2_decode_init(AVCodecContext *avctx)
     capture->av_codec_id = AV_CODEC_ID_RAWVIDEO;
     capture->av_pix_fmt = avctx->pix_fmt;
 
+    /* the client requests the codec to generate DRM frames:
+     *   - data[0] will therefore point to the returned AVDRMFrameDescriptor
+     *       check the ff_v4l2_buffer_to_avframe conversion function.
+     *   - the DRM frame format is passed in the DRM frame descriptor layer.
+     *       check the v4l2_get_drm_frame function.
+     */
+    if (avctx->pix_fmt == AV_PIX_FMT_DRM_PRIME)
+        s->output_drm = 1;
+
     ret = ff_v4l2_m2m_codec_init(avctx);
     if (ret) {
         av_log(avctx, AV_LOG_ERROR, "can't configure decoder\n");
@@ -202,6 +217,11 @@ static const AVOption options[] = {
     { NULL},
 };
 
+static const AVCodecHWConfigInternal *v4l2_m2m_hw_configs[] = {
+    HW_CONFIG_INTERNAL(DRM_PRIME),
+    NULL
+};
+
 #define M2MDEC_CLASS(NAME) \
     static const AVClass v4l2_m2m_ ## NAME ## _dec_class = { \
         .class_name = #NAME "_v4l2_m2m_decoder", \
@@ -222,7 +242,10 @@ static const AVOption options[] = {
         .init           = v4l2_decode_init, \
         .receive_frame  = v4l2_receive_frame, \
         .close          = ff_v4l2_m2m_codec_end, \
+        .pix_fmts       = (const enum AVPixelFormat[]) { AV_PIX_FMT_DRM_PRIME, \
+                                                         AV_PIX_FMT_NONE}, \
         .bsfs           = bsf_name, \
+        .hw_configs     = v4l2_m2m_hw_configs, \
         .capabilities   = AV_CODEC_CAP_HARDWARE | AV_CODEC_CAP_DELAY, \
         .wrapper_name   = "v4l2m2m", \
     };
-- 
2.17.0



More information about the ffmpeg-devel mailing list