[FFmpeg-cvslog] dnn/tensorflow: add log error message

Ting Fu git at videolan.org
Mon Aug 31 08:18:56 EEST 2020


ffmpeg | branch: master | Ting Fu <ting.fu at intel.com> | Thu Aug 27 12:17:22 2020 +0800| [4a11a6f4ccc7c56ccc82adf0c3ab4054d4c22d1e] | committer: Guo, Yejun

dnn/tensorflow: add log error message

Signed-off-by: Ting Fu <ting.fu at intel.com>

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=4a11a6f4ccc7c56ccc82adf0c3ab4054d4c22d1e
---

 libavfilter/dnn/dnn_backend_tf.c | 59 ++++++++++++++++++++++++++++++++++++++--
 1 file changed, 57 insertions(+), 2 deletions(-)

diff --git a/libavfilter/dnn/dnn_backend_tf.c b/libavfilter/dnn/dnn_backend_tf.c
index bdc90d5063..5e7f37bb12 100644
--- a/libavfilter/dnn/dnn_backend_tf.c
+++ b/libavfilter/dnn/dnn_backend_tf.c
@@ -34,7 +34,12 @@
 
 #include <tensorflow/c/c_api.h>
 
+typedef struct TFContext {
+    const AVClass *class;
+} TFContext;
+
 typedef struct TFModel{
+    TFContext ctx;
     TF_Graph *graph;
     TF_Session *session;
     TF_Status *status;
@@ -44,6 +49,14 @@ typedef struct TFModel{
     uint32_t nb_output;
 } TFModel;
 
+static const AVClass dnn_tensorflow_class = {
+    .class_name = "dnn_tensorflow",
+    .item_name  = av_default_item_name,
+    .option     = NULL,
+    .version    = LIBAVUTIL_VERSION_INT,
+    .category   = AV_CLASS_CATEGORY_FILTER,
+};
+
 static void free_buffer(void *data, size_t length)
 {
     av_freep(&data);
@@ -107,13 +120,16 @@ static TF_Tensor *allocate_input_tensor(const DNNData *input)
 static DNNReturnType get_input_tf(void *model, DNNData *input, const char *input_name)
 {
     TFModel *tf_model = (TFModel *)model;
+    TFContext *ctx = &tf_model->ctx;
     TF_Status *status;
     int64_t dims[4];
 
     TF_Output tf_output;
     tf_output.oper = TF_GraphOperationByName(tf_model->graph, input_name);
-    if (!tf_output.oper)
+    if (!tf_output.oper) {
+        av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model\n", input_name);
         return DNN_ERROR;
+    }
 
     tf_output.index = 0;
     input->dt = TF_OperationOutputType(tf_output);
@@ -122,6 +138,7 @@ static DNNReturnType get_input_tf(void *model, DNNData *input, const char *input
     TF_GraphGetTensorShape(tf_model->graph, tf_output, dims, 4, status);
     if (TF_GetCode(status) != TF_OK){
         TF_DeleteStatus(status);
+        av_log(ctx, AV_LOG_ERROR, "Failed to get input tensor shape: number of dimension incorrect\n");
         return DNN_ERROR;
     }
     TF_DeleteStatus(status);
@@ -138,12 +155,14 @@ static DNNReturnType get_input_tf(void *model, DNNData *input, const char *input
 static DNNReturnType set_input_tf(void *model, DNNData *input, const char *input_name)
 {
     TFModel *tf_model = (TFModel *)model;
+    TFContext *ctx = &tf_model->ctx;
     TF_SessionOptions *sess_opts;
     const TF_Operation *init_op = TF_GraphOperationByName(tf_model->graph, "init");
 
     // Input operation
     tf_model->input.oper = TF_GraphOperationByName(tf_model->graph, input_name);
     if (!tf_model->input.oper){
+        av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model\n", input_name);
         return DNN_ERROR;
     }
     tf_model->input.index = 0;
@@ -152,6 +171,7 @@ static DNNReturnType set_input_tf(void *model, DNNData *input, const char *input
     }
     tf_model->input_tensor = allocate_input_tensor(input);
     if (!tf_model->input_tensor){
+        av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for input tensor\n");
         return DNN_ERROR;
     }
     input->data = (float *)TF_TensorData(tf_model->input_tensor);
@@ -167,6 +187,7 @@ static DNNReturnType set_input_tf(void *model, DNNData *input, const char *input
     TF_DeleteSessionOptions(sess_opts);
     if (TF_GetCode(tf_model->status) != TF_OK)
     {
+        av_log(ctx, AV_LOG_ERROR, "Failed to create new session with model graph\n");
         return DNN_ERROR;
     }
 
@@ -178,6 +199,7 @@ static DNNReturnType set_input_tf(void *model, DNNData *input, const char *input
                       &init_op, 1, NULL, tf_model->status);
         if (TF_GetCode(tf_model->status) != TF_OK)
         {
+            av_log(ctx, AV_LOG_ERROR, "Failed to run session when initializing\n");
             return DNN_ERROR;
         }
     }
@@ -187,11 +209,13 @@ static DNNReturnType set_input_tf(void *model, DNNData *input, const char *input
 
 static DNNReturnType load_tf_model(TFModel *tf_model, const char *model_filename)
 {
+    TFContext *ctx = &tf_model->ctx;
     TF_Buffer *graph_def;
     TF_ImportGraphDefOptions *graph_opts;
 
     graph_def = read_graph(model_filename);
     if (!graph_def){
+        av_log(ctx, AV_LOG_ERROR, "Failed to read model \"%s\" graph\n", model_filename);
         return DNN_ERROR;
     }
     tf_model->graph = TF_NewGraph();
@@ -203,6 +227,7 @@ static DNNReturnType load_tf_model(TFModel *tf_model, const char *model_filename
     if (TF_GetCode(tf_model->status) != TF_OK){
         TF_DeleteGraph(tf_model->graph);
         TF_DeleteStatus(tf_model->status);
+        av_log(ctx, AV_LOG_ERROR, "Failed to import serialized graph to model graph\n");
         return DNN_ERROR;
     }
 
@@ -214,6 +239,7 @@ static DNNReturnType load_tf_model(TFModel *tf_model, const char *model_filename
 static DNNReturnType add_conv_layer(TFModel *tf_model, TF_Operation *transpose_op, TF_Operation **cur_op,
                                     ConvolutionalParams* params, const int layer)
 {
+    TFContext *ctx = &tf_model->ctx;
     TF_Operation *op;
     TF_OperationDescription *op_desc;
     TF_Output input;
@@ -239,10 +265,12 @@ static DNNReturnType add_conv_layer(TFModel *tf_model, TF_Operation *transpose_o
     memcpy(TF_TensorData(tensor), params->kernel, size * sizeof(float));
     TF_SetAttrTensor(op_desc, "value", tensor, tf_model->status);
     if (TF_GetCode(tf_model->status) != TF_OK){
+        av_log(ctx, AV_LOG_ERROR, "Failed to set value for kernel of conv layer %d\n", layer);
         return DNN_ERROR;
     }
     op = TF_FinishOperation(op_desc, tf_model->status);
     if (TF_GetCode(tf_model->status) != TF_OK){
+        av_log(ctx, AV_LOG_ERROR, "Failed to add kernel to conv layer %d\n", layer);
         return DNN_ERROR;
     }
 
@@ -256,6 +284,7 @@ static DNNReturnType add_conv_layer(TFModel *tf_model, TF_Operation *transpose_o
     TF_SetAttrType(op_desc, "Tperm", TF_INT32);
     op = TF_FinishOperation(op_desc, tf_model->status);
     if (TF_GetCode(tf_model->status) != TF_OK){
+        av_log(ctx, AV_LOG_ERROR, "Failed to add transpose to conv layer %d\n", layer);
         return DNN_ERROR;
     }
 
@@ -270,6 +299,7 @@ static DNNReturnType add_conv_layer(TFModel *tf_model, TF_Operation *transpose_o
     TF_SetAttrString(op_desc, "padding", "VALID", 5);
     *cur_op = TF_FinishOperation(op_desc, tf_model->status);
     if (TF_GetCode(tf_model->status) != TF_OK){
+        av_log(ctx, AV_LOG_ERROR, "Failed to add conv2d to conv layer %d\n", layer);
         return DNN_ERROR;
     }
 
@@ -282,10 +312,12 @@ static DNNReturnType add_conv_layer(TFModel *tf_model, TF_Operation *transpose_o
     memcpy(TF_TensorData(tensor), params->biases, params->output_num * sizeof(float));
     TF_SetAttrTensor(op_desc, "value", tensor, tf_model->status);
     if (TF_GetCode(tf_model->status) != TF_OK){
+        av_log(ctx, AV_LOG_ERROR, "Failed to set value for conv_biases of conv layer %d\n", layer);
         return DNN_ERROR;
     }
     op = TF_FinishOperation(op_desc, tf_model->status);
     if (TF_GetCode(tf_model->status) != TF_OK){
+        av_log(ctx, AV_LOG_ERROR, "Failed to add conv_biases to conv layer %d\n", layer);
         return DNN_ERROR;
     }
 
@@ -298,6 +330,7 @@ static DNNReturnType add_conv_layer(TFModel *tf_model, TF_Operation *transpose_o
     TF_SetAttrType(op_desc, "T", TF_FLOAT);
     *cur_op = TF_FinishOperation(op_desc, tf_model->status);
     if (TF_GetCode(tf_model->status) != TF_OK){
+        av_log(ctx, AV_LOG_ERROR, "Failed to add bias_add to conv layer %d\n", layer);
         return DNN_ERROR;
     }
 
@@ -313,6 +346,7 @@ static DNNReturnType add_conv_layer(TFModel *tf_model, TF_Operation *transpose_o
         op_desc = TF_NewOperation(tf_model->graph, "Sigmoid", name_buffer);
         break;
     default:
+        av_log(ctx, AV_LOG_ERROR, "Unsupported convolutional activation function\n");
         return DNN_ERROR;
     }
     input.oper = *cur_op;
@@ -320,6 +354,7 @@ static DNNReturnType add_conv_layer(TFModel *tf_model, TF_Operation *transpose_o
     TF_SetAttrType(op_desc, "T", TF_FLOAT);
     *cur_op = TF_FinishOperation(op_desc, tf_model->status);
     if (TF_GetCode(tf_model->status) != TF_OK){
+        av_log(ctx, AV_LOG_ERROR, "Failed to add activation function to conv layer %d\n", layer);
         return DNN_ERROR;
     }
 
@@ -329,6 +364,7 @@ static DNNReturnType add_conv_layer(TFModel *tf_model, TF_Operation *transpose_o
 static DNNReturnType add_depth_to_space_layer(TFModel *tf_model, TF_Operation **cur_op,
                                               DepthToSpaceParams *params, const int layer)
 {
+    TFContext *ctx = &tf_model->ctx;
     TF_OperationDescription *op_desc;
     TF_Output input;
     char name_buffer[NAME_BUFFER_SIZE];
@@ -342,6 +378,7 @@ static DNNReturnType add_depth_to_space_layer(TFModel *tf_model, TF_Operation **
     TF_SetAttrInt(op_desc, "block_size", params->block_size);
     *cur_op = TF_FinishOperation(op_desc, tf_model->status);
     if (TF_GetCode(tf_model->status) != TF_OK){
+        av_log(ctx, AV_LOG_ERROR, "Failed to add depth_to_space to layer %d\n", layer);
         return DNN_ERROR;
     }
 
@@ -351,6 +388,7 @@ static DNNReturnType add_depth_to_space_layer(TFModel *tf_model, TF_Operation **
 static DNNReturnType add_pad_layer(TFModel *tf_model, TF_Operation **cur_op,
                                               LayerPadParams *params, const int layer)
 {
+    TFContext *ctx = &tf_model->ctx;
     TF_Operation *op;
     TF_Tensor *tensor;
     TF_OperationDescription *op_desc;
@@ -375,10 +413,12 @@ static DNNReturnType add_pad_layer(TFModel *tf_model, TF_Operation **cur_op,
     pads[7] = params->paddings[3][1];
     TF_SetAttrTensor(op_desc, "value", tensor, tf_model->status);
     if (TF_GetCode(tf_model->status) != TF_OK){
+        av_log(ctx, AV_LOG_ERROR, "Failed to set value for pad of layer %d\n", layer);
         return DNN_ERROR;
     }
     op = TF_FinishOperation(op_desc, tf_model->status);
     if (TF_GetCode(tf_model->status) != TF_OK){
+        av_log(ctx, AV_LOG_ERROR, "Failed to add pad to layer %d\n", layer);
         return DNN_ERROR;
     }
 
@@ -393,6 +433,7 @@ static DNNReturnType add_pad_layer(TFModel *tf_model, TF_Operation **cur_op,
     TF_SetAttrString(op_desc, "mode", "SYMMETRIC", 9);
     *cur_op = TF_FinishOperation(op_desc, tf_model->status);
     if (TF_GetCode(tf_model->status) != TF_OK){
+        av_log(ctx, AV_LOG_ERROR, "Failed to add mirror_pad to layer %d\n", layer);
         return DNN_ERROR;
     }
 
@@ -402,6 +443,7 @@ static DNNReturnType add_pad_layer(TFModel *tf_model, TF_Operation **cur_op,
 static DNNReturnType add_maximum_layer(TFModel *tf_model, TF_Operation **cur_op,
                                        DnnLayerMaximumParams *params, const int layer)
 {
+    TFContext *ctx = &tf_model->ctx;
     TF_Operation *op;
     TF_Tensor *tensor;
     TF_OperationDescription *op_desc;
@@ -418,10 +460,12 @@ static DNNReturnType add_maximum_layer(TFModel *tf_model, TF_Operation **cur_op,
     *y = params->val.y;
     TF_SetAttrTensor(op_desc, "value", tensor, tf_model->status);
     if (TF_GetCode(tf_model->status) != TF_OK){
+        av_log(ctx, AV_LOG_ERROR, "Failed to set value for maximum/y of layer %d", layer);
         return DNN_ERROR;
     }
     op = TF_FinishOperation(op_desc, tf_model->status);
     if (TF_GetCode(tf_model->status) != TF_OK){
+        av_log(ctx, AV_LOG_ERROR, "Failed to add maximum/y to layer %d\n", layer);
         return DNN_ERROR;
     }
 
@@ -435,6 +479,7 @@ static DNNReturnType add_maximum_layer(TFModel *tf_model, TF_Operation **cur_op,
     TF_SetAttrType(op_desc, "T", TF_FLOAT);
     *cur_op = TF_FinishOperation(op_desc, tf_model->status);
     if (TF_GetCode(tf_model->status) != TF_OK){
+        av_log(ctx, AV_LOG_ERROR, "Failed to add maximum to layer %d\n", layer);
         return DNN_ERROR;
     }
 
@@ -443,6 +488,7 @@ static DNNReturnType add_maximum_layer(TFModel *tf_model, TF_Operation **cur_op,
 
 static DNNReturnType load_native_model(TFModel *tf_model, const char *model_filename)
 {
+    TFContext *ctx = &tf_model->ctx;
     int32_t layer;
     TF_OperationDescription *op_desc;
     TF_Operation *op;
@@ -458,6 +504,7 @@ static DNNReturnType load_native_model(TFModel *tf_model, const char *model_file
 
     model = ff_dnn_load_model_native(model_filename, NULL);
     if (!model){
+        av_log(ctx, AV_LOG_ERROR, "Failed to load native model\n");
         return DNN_ERROR;
     }
 
@@ -469,6 +516,7 @@ static DNNReturnType load_native_model(TFModel *tf_model, const char *model_file
     { \
         TF_DeleteGraph(tf_model->graph); \
         TF_DeleteStatus(tf_model->status); \
+        av_log(ctx, AV_LOG_ERROR, "Failed to set value or add operator to layer\n"); \
         return DNN_ERROR; \
     }
 
@@ -553,6 +601,7 @@ DNNModel *ff_dnn_load_model_tf(const char *model_filename, const char *options)
         av_freep(&model);
         return NULL;
     }
+    tf_model->ctx.class = &dnn_tensorflow_class;
 
     if (load_tf_model(tf_model, model_filename) != DNN_SUCCESS){
         if (load_native_model(tf_model, model_filename) != DNN_SUCCESS){
@@ -575,10 +624,13 @@ DNNReturnType ff_dnn_execute_model_tf(const DNNModel *model, DNNData *outputs, c
 {
     TF_Output *tf_outputs;
     TFModel *tf_model = (TFModel *)model->model;
+    TFContext *ctx = &tf_model->ctx;
 
     tf_outputs = av_malloc_array(nb_output, sizeof(*tf_outputs));
-    if (tf_outputs == NULL)
+    if (tf_outputs == NULL) {
+        av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for *tf_outputs\n"); \
         return DNN_ERROR;
+    }
 
     if (tf_model->output_tensors) {
         for (uint32_t i = 0; i < tf_model->nb_output; ++i) {
@@ -593,6 +645,7 @@ DNNReturnType ff_dnn_execute_model_tf(const DNNModel *model, DNNData *outputs, c
     tf_model->output_tensors = av_mallocz_array(nb_output, sizeof(*tf_model->output_tensors));
     if (!tf_model->output_tensors) {
         av_freep(&tf_outputs);
+        av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for output tensor\n"); \
         return DNN_ERROR;
     }
 
@@ -600,6 +653,7 @@ DNNReturnType ff_dnn_execute_model_tf(const DNNModel *model, DNNData *outputs, c
         tf_outputs[i].oper = TF_GraphOperationByName(tf_model->graph, output_names[i]);
         if (!tf_outputs[i].oper) {
             av_freep(&tf_outputs);
+            av_log(ctx, AV_LOG_ERROR, "Could not find output \"%s\" in model\n", output_names[i]); \
             return DNN_ERROR;
         }
         tf_outputs[i].index = 0;
@@ -611,6 +665,7 @@ DNNReturnType ff_dnn_execute_model_tf(const DNNModel *model, DNNData *outputs, c
                   NULL, 0, NULL, tf_model->status);
     if (TF_GetCode(tf_model->status) != TF_OK) {
         av_freep(&tf_outputs);
+        av_log(ctx, AV_LOG_ERROR, "Failed to run session when executing model\n");
         return DNN_ERROR;
     }
 



More information about the ffmpeg-cvslog mailing list