[FFmpeg-devel] [PATCH 1/2] dnn_backend_native_layer_mathunary: add ceil support
Mingyu Yin
mingyu.yin at intel.com
Tue Jul 28 14:49:24 EEST 2020
It can be tested with the model generated with below python script:
import tensorflow as tf
import os
import numpy as np
import imageio
from tensorflow.python.framework import graph_util
name = 'ceil'
pb_file_path = os.getcwd()
if not os.path.exists(pb_file_path+'/{}_savemodel/'.format(name)):
os.mkdir(pb_file_path+'/{}_savemodel/'.format(name))
with tf.Session(graph=tf.Graph()) as sess:
in_img = imageio.imread('detection.jpg')
in_img = in_img.astype(np.float32)
in_data = in_img[np.newaxis, :]
input_x = tf.placeholder(tf.float32, shape=[1, None, None, 3], name='dnn_in')
if name=='ceil':
y = tf.math.ceil( input_x, name='dnn_out')
sess.run(tf.global_variables_initializer())
constant_graph = graph_util.convert_variables_to_constants(sess,
sess.graph_def, ['dnn_out'])
with tf.gfile.FastGFile(pb_file_path+'/{}_savemodel/model.pb'.format(name), mode='wb') as f:
f.write(constant_graph.SerializeToString())
builder = tf.saved_model.builder.SavedModelBuilder(pb_file_path+'/{}_save'.format(name))
builder.add_meta_graph_and_variables(sess,
['{}_meta_graph_server'.format(name)])
print("model.pb generated, please use \
path_to_ffmpeg/tools/python/convert.py to generate model.model\n")
output = sess.run(y, feed_dict={ input_x: in_data})
imageio.imsave("out.jpg", np.squeeze(output))
print("To verify, please use\n \n \
./ffmpeg -i path_to_image -vf format=rgb24,dnn_processing=path_to_tensorflwo_model(.pb):input=dnn_in:output=dnn_out:dnn_backend=tensorflow -f framemd5 path_to_output_result_dir/tensorflow_out.md5\n \n \
to generate output result of tensorflow model\n")
print("To verify, please use\n \n \
./ffmpeg -i path_to_image -vf format=rgb24,dnn_processing=path_to_native_model(.model):input=dnn_in:output=dnn_out:dnn_backend=native -f framemd5 path_to_output_result_dir/native_out.md5\n \n \
to generate output result of native model\n")
builder.save()
Signed-off-by: Mingyu Yin <mingyu.yin at intel.com>
---
libavfilter/dnn/dnn_backend_native_layer_mathunary.c | 4 ++++
libavfilter/dnn/dnn_backend_native_layer_mathunary.h | 1 +
tests/dnn/dnn-layer-mathunary-test.c | 4 ++++
tools/python/convert_from_tensorflow.py | 4 +++-
tools/python/convert_header.py | 2 +-
5 files changed, 13 insertions(+), 2 deletions(-)
diff --git a/libavfilter/dnn/dnn_backend_native_layer_mathunary.c b/libavfilter/dnn/dnn_backend_native_layer_mathunary.c
index c5f0f7adec..a62f6ba6f0 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_mathunary.c
+++ b/libavfilter/dnn/dnn_backend_native_layer_mathunary.c
@@ -130,6 +130,10 @@ int dnn_execute_layer_math_unary(DnnOperand *operands, const int32_t *input_oper
for (int i = 0; i < dims_count; ++i)
dst[i] = atanh(src[i]);
return 0;
+ case DMUO_CEIL:
+ for (int i = 0; i < dims_count; ++i)
+ dst[i] = ceil(src[i]);
+ return 0;
default:
return -1;
}
diff --git a/libavfilter/dnn/dnn_backend_native_layer_mathunary.h b/libavfilter/dnn/dnn_backend_native_layer_mathunary.h
index 8076356ba4..82b2d7f4ab 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_mathunary.h
+++ b/libavfilter/dnn/dnn_backend_native_layer_mathunary.h
@@ -43,6 +43,7 @@ typedef enum {
DMUO_ASINH = 10,
DMUO_ACOSH = 11,
DMUO_ATANH = 12,
+ DMUO_CEIL = 13,
DMUO_COUNT
} DNNMathUnaryOperation;
diff --git a/tests/dnn/dnn-layer-mathunary-test.c b/tests/dnn/dnn-layer-mathunary-test.c
index 5afc5c157e..a653b945c4 100644
--- a/tests/dnn/dnn-layer-mathunary-test.c
+++ b/tests/dnn/dnn-layer-mathunary-test.c
@@ -56,6 +56,8 @@ static float get_expected(float f, DNNMathUnaryOperation op)
return acosh(f);
case DMUO_ATANH:
return atanh(f);
+ case DMUO_CEIL(f):
+ return ceil(f);
default:
av_assert0(!"not supported yet");
return 0.f;
@@ -128,5 +130,7 @@ int main(int agrc, char **argv)
return 1;
if (test(DMUO_ATANH))
return 1;
+ if (test(DMUO_CEIL))
+ return 1;
return 0;
}
diff --git a/tools/python/convert_from_tensorflow.py b/tools/python/convert_from_tensorflow.py
index 85db7bf710..64b7551314 100644
--- a/tools/python/convert_from_tensorflow.py
+++ b/tools/python/convert_from_tensorflow.py
@@ -72,7 +72,9 @@ class TFConverter:
self.conv2d_scopename_inputname_dict = {}
self.op2code = {'Conv2D':1, 'DepthToSpace':2, 'MirrorPad':3, 'Maximum':4, 'MathBinary':5, 'MathUnary':6}
self.mathbin2code = {'Sub':0, 'Add':1, 'Mul':2, 'RealDiv':3, 'Minimum':4}
- self.mathun2code = {'Abs':0, 'Sin':1, 'Cos':2, 'Tan':3, 'Asin':4, 'Acos':5, 'Atan':6, 'Sinh':7, 'Cosh':8, 'Tanh':9, 'Asinh':10, 'Acosh':11, 'Atanh':12}
+ self.mathun2code = {'Abs':0, 'Sin':1, 'Cos':2, 'Tan':3, 'Asin':4,
+ 'Acos':5, 'Atan':6, 'Sinh':7, 'Cosh':8, 'Tanh':9, 'Asinh':10,
+ 'Acosh':11, 'Atanh':12, 'Ceil':13}
self.mirrorpad_mode = {'CONSTANT':0, 'REFLECT':1, 'SYMMETRIC':2}
self.name_operand_dict = {}
diff --git a/tools/python/convert_header.py b/tools/python/convert_header.py
index 9851d84144..62f1d342f3 100644
--- a/tools/python/convert_header.py
+++ b/tools/python/convert_header.py
@@ -23,4 +23,4 @@ str = 'FFMPEGDNNNATIVE'
major = 1
# increase minor when we don't have to re-convert the model file
-minor = 18
+minor = 19
--
2.17.1
More information about the ffmpeg-devel
mailing list