Mnist网络算子分析之卷积

219 篇文章 31 订阅
156 篇文章 17 订阅

Mnist.onnx的网络结构如下:

首先看一下,每一次计算图的遍历,执行了几次卷积:

修改代码:

diff --git a/src/default/Conv.c b/src/default/Conv.c
index f812c87..507d303 100644
--- a/src/default/Conv.c
+++ b/src/default/Conv.c
@@ -268,6 +268,7 @@ static inline void dgemm_float64(int n, int m, int o, double * A, double * B, do
 
 static void Conv_float16(struct onnx_node_t * n)
 {
+	printf("%s line %d.\n", __func__, __LINE__);
 	struct operator_pdata_t * pdat = (struct operator_pdata_t *)n->priv;
 	struct onnx_tensor_t * y = n->outputs[0];
 	struct onnx_tensor_t * x = n->inputs[0];
@@ -343,6 +344,7 @@ static void Conv_float16(struct onnx_node_t * n)
 
 		if (conv_mode == CONV_SIMPLE || conv_mode == CONV_CACHED)
 		{
+			printf("%s line %d, NOT IM2COL Mode.\n", __func__, __LINE__);
 			for(int h = 0; h < oH; ++h)
 			{
 				for(int w = 0; w < oW; ++w)
@@ -424,6 +426,7 @@ static void Conv_float16(struct onnx_node_t * n)
 		}
 		else if (conv_mode == CONV_IM2COL)
 		{			
+			printf("%s line %d, IM2COL Mode.\n", __func__, __LINE__);
 			for (int g = 0; g < pdat->group; g++)
 			{
 				for (size_t m = 0; m < MM; m++)
@@ -554,6 +557,7 @@ static void Conv_float16(struct onnx_node_t * n)
 
 static void Conv_float32(struct onnx_node_t * n)
 {
+	printf("%s line %d.\n", __func__, __LINE__);
 	struct operator_pdata_t * pdat = (struct operator_pdata_t *)n->priv;
 	struct onnx_tensor_t * y = n->outputs[0];
 	struct onnx_tensor_t * x = n->inputs[0];
@@ -629,6 +633,7 @@ static void Conv_float32(struct onnx_node_t * n)
 
 		if (conv_mode == CONV_SIMPLE || conv_mode == CONV_CACHED)
 		{
+			printf("%s line %d, NOT IM2COL Mode.\n", __func__, __LINE__);
 			for(int h = 0; h < oH; ++h)
 			{
 				for(int w = 0; w < oW; ++w)
@@ -710,6 +715,7 @@ static void Conv_float32(struct onnx_node_t * n)
 		}
 		else if (conv_mode == CONV_IM2COL)
 		{			
+			printf("%s line %d, IM2COL Mode.\n", __func__, __LINE__);
 			for (int g = 0; g < pdat->group; g++)
 			{
 				for (size_t m = 0; m < MM; m++)
@@ -840,6 +846,7 @@ static void Conv_float32(struct onnx_node_t * n)
 
 static void Conv_float64(struct onnx_node_t * n)
 {
+	printf("%s line %d.\n", __func__, __LINE__);
 	struct operator_pdata_t * pdat = (struct operator_pdata_t *)n->priv;
 	struct onnx_tensor_t * y = n->outputs[0];
 	struct onnx_tensor_t * x = n->inputs[0];
@@ -915,6 +922,7 @@ static void Conv_float64(struct onnx_node_t * n)
 
 		if (conv_mode == CONV_SIMPLE || conv_mode == CONV_CACHED)
 		{
+			printf("%s line %d, NOT IM2COL Mode.\n", __func__, __LINE__);
 			for(int h = 0; h < oH; ++h)
 			{
 				for(int w = 0; w < oW; ++w)
@@ -996,6 +1004,7 @@ static void Conv_float64(struct onnx_node_t * n)
 		}
 		else if (conv_mode == CONV_IM2COL)
 		{			
+			printf("%s line %d, IM2COL Mode.\n", __func__, __LINE__);
 			for (int g = 0; g < pdat->group; g++)
 			{
 				for (size_t m = 0; m < MM; m++)
diff --git a/src/onnx.c b/src/onnx.c
index 67188ca..960361d 100644
--- a/src/onnx.c
+++ b/src/onnx.c
@@ -2115,6 +2115,10 @@ void onnx_run(struct onnx_context_t * ctx)
 	struct onnx_node_t * n;
 	int i;
 
+	static int run_times = 0;
+	int count = run_times ++;
+
+	printf("%s line %d, ==============inference %d times start=============\n", __func__, __LINE__, count);
 	if(ctx)
 	{
 		for(i = 0; i < ctx->g->nlen; i++)
@@ -2124,4 +2128,5 @@ void onnx_run(struct onnx_context_t * ctx)
 				n->operator(n);
 		}
 	}
+	printf("%s line %d, ==============inference %d times finish=============\n", __func__, __LINE__, count);
 }

 运行测试:

可以看到,每轮计算图的遍历,它均执行了2次Conv_float32的卷积算法,并且模式是IM2COL,这和计算图中的卷积层出现的次数是吻合的。

另外一个待澄清点,为何这里的dims[0]被忽略了

为了澄清,我们添加DEBU如下:

czl@czl-VirtualBox:~/WorkSpace/libonnx/examples/mnist$ git diff
diff --git a/src/default/Conv.c b/src/default/Conv.c
index f812c87..e4019ad 100644
--- a/src/default/Conv.c
+++ b/src/default/Conv.c
@@ -585,6 +585,7 @@ static void Conv_float32(struct onnx_node_t * n)
        }
        if(ndim == 4)
        {
+               printf("%s line %d, ndim = %d, x->dims[0] = %d, group = %d.\n", __func__, __LINE__, ndim, x->dims[0], pdat->group);
                int iC = x->dims[1];
                int iH = x->dims[2];
                int iW = x->dims[3];
@@ -789,6 +790,7 @@ static void Conv_float32(struct onnx_node_t * n)
                int w_dim[ndim];
                int b_dim[ndim];
 
+               printf("%s line %d, ndim = %d, x->dims[0] = %d.\n", __func__, __LINE__, ndim, x->dims[0]);
                memset(o_dim, 0, sizeof(o_dim));
                do {
                        b_dim[0] = o_dim[0];
diff --git a/src/onnx.c b/src/onnx.c
index 67188ca..ccff1f5 100644
--- a/src/onnx.c
+++ b/src/onnx.c
@@ -2115,13 +2115,17 @@ void onnx_run(struct onnx_context_t * ctx)
        struct onnx_node_t * n;
        int i;
 
+       static int count = 0;^M
+       int tmp = count ++;^M
        if(ctx)
        {
+               printf("%s lien %d, ====================%d times=======================\n", __func__, __LINE__, tmp);^M
                for(i = 0; i < ctx->g->nlen; i++)
                {
                        n = &ctx->g->nodes[i];
                        if(n->reshape(n))
                                n->operator(n);
                }
+               printf("%s lien %d, ====================%d times=======================\n", __func__, __LINE__, tmp);^M
        }
 }
czl@czl-VirtualBox:~/WorkSpace/libonnx/examples/mnist$ 

可以看到,ndim总是等于4,并且第一维都是1,这也和网络结构中的卷积层描述一致,第一个维度都是1.


结束!

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

papaofdoudou

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值