Revert "add onnx pack"

This commit is contained in:
Mculover666
2021-09-07 12:58:23 +08:00
committed by GitHub
parent da4cfd716e
commit 9a13cf1860
25 changed files with 0 additions and 8966 deletions

View File

@@ -1,95 +0,0 @@
![](https://raw.githubusercontent.com/onnx/onnx/master/docs/ONNX_logo_main.png)
# ONNX
**通用神经网络模型推理框架 onnx 在 TencentOS-tiny 上的移植**
[ONNX](https://onnx.ai/) (Open Neural Network Exchange) 是机器学习模型的通用格式,可以融合不同机器学习框架的模型。
ONNX是一个用于表示深度学习模型的标准可使模型在不同框架之间进行转移。Tensorflow, Keras, Pytorch, Caffe2, mxnet等知名深度学习框架训练的模型可以转化为onnx格式于是便可以在RTOS上运行。
## 支持算子
- Conv2D
- Relu
- Maxpool
- Softmax
- Matmul
- Add
- Flatten
- Transpose
## mnist例程
当前有两个手写体识别的例程:
mnist_int 和 mnist_float
分别适用用于整形推理和浮点推理根据平台不同而定其中mnist_int在imx6ull上通过验证mnist_float在stm32L4上通过验证。最小的 demo 只需要 16KB 内存因此在STM32F103C8T6 上也可以运行其中mnist_int相当于是做了int32的量化针对归一化后的浮点double型参数乘以1000倍并修改了softmax层的算子最后可以完成速度更快的整形数推理
| 例程文件 | 说明 |
| ------------- | ---------------------------------------- |
| mnist_int.c | 整形推理,模型参数保存在 mnist_int.h |
| mnist_float.c | 浮点推理模型参数保存在mnist_float.h |
#### 模型结构
```
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_5 (Conv2D) (None, 28, 28, 2) 20
_________________________________________________________________
max_pooling2d_5 (MaxPooling2 (None, 14, 14, 2) 0
_________________________________________________________________
dropout_5 (Dropout) (None, 14, 14, 2) 0
_________________________________________________________________
conv2d_6 (Conv2D) (None, 14, 14, 2) 38
_________________________________________________________________
max_pooling2d_6 (MaxPooling2 (None, 7, 7, 2) 0
_________________________________________________________________
dropout_6 (Dropout) (None, 7, 7, 2) 0
_________________________________________________________________
flatten_3 (Flatten) (None, 98) 0
_________________________________________________________________
dense_5 (Dense) (None, 4) 396
_________________________________________________________________
dense_6 (Dense) (None, 10) 50
=================================================================
Total params: 504
Trainable params: 504
Non-trainable params: 0
_________________________________________________________________
```
推理测试
![mnist_test](pic/mnist_test.png)
## 注意事项
由于 onnx 的模型是 Google Protobuf v3 的格式所以在protobuf文件夹中也包含了模型解析部分可以配合文件系统进行模型读取。
- protobuf-c
- onnx-pb-c
在 platform 中是对于不同平台的特殊适配比如malloc、free的实现等
- tencentos_libc_malloc
## Todo List
- 解析更加复杂的模型
- 针对不同硬件适配加速算子
## 参考
https://github.com/wuhanstudio/onnx-backend
## 联系方式
- 维护derek
- 邮箱dkeji627@gmail.com

View File

@@ -1,191 +0,0 @@
#include <stdio.h>
#include <stdlib.h>
#include "mnist_int.h"
#include "onnx.h"
static const char codeLib[] = "@B%8&WM#*oahkbdpqwmZO0QLCJUYXzcvunxrjft/\\|()1{}[]?-_+~<>i!lI;:,\"^`'. ";
int data[5]={1.5 , 2.5 , 3.5 , 4.5 , 5.5};
static const int img[2][784] = {IMG0, IMG1};
static const int img1[784] = {1,2,3,4,5};
int hello()
{
printf("hello pnnx\r\n");
return 0;
}
void print_img(void * buf)
{
int index = 0;
//char ch = '@';
int x = 0;
int y = 0;
printf("test2\r\n");
for(y = 0; y < 28; y++)
{
for (x = 0; x < 28; x++)
{
index = 0;
if(((int*)buf)[y*28+x] > 600)
{
index =69;
}
if(index < 0)
{
index = 0;
}
printf("%c",codeLib[index]);
printf("%c",codeLib[index]);
}
printf("\r\n");
}
}
int mnist()
{
printf("test1\r\n");
int img_index = 1;
print_img(img[img_index]);
printf("img ok\r\n");
// 1. Conv2D
int64_t shapeW3[] = {2, 1, 3, 3};
int64_t dimW3 = 4;
int64_t permW3_t[] = { 0, 2, 3, 1};
int* W3_t = transpose(W3, shapeW3, dimW3, permW3_t);
printf("transpose ok\r\n");
int* conv1 = (int*) malloc(sizeof(int)*28*28*2);
memset(conv1, 0, sizeof(sizeof(int)*28*28*2));
conv2D(img[img_index], 28, 28, 1, W3, 2, 3, 3, 1, 1, 1, 1, B3, conv1, 28, 28);
free(W3_t);
printf("Conv2D ok \r\n");
// 2. Relu
int* relu1 = (int*) malloc(sizeof(int)*28*28*2);
relu(conv1, 28*28*2, relu1);
free(conv1);
printf("Relu ok\r\n");
// 3. Maxpool
int* maxpool1 = (int*) malloc(sizeof(int)*14*14*2);
memset(maxpool1, 0, sizeof(sizeof(int)*14*14*2));
maxpool(relu1, 28, 28, 2, 2, 2, 0, 0, 2, 2, 14, 14, maxpool1);
free(relu1);
printf("Maxpool ok\r\n");
// 4. Conv2D
int64_t shapeW2[] = {2, 2, 3, 3};
int64_t dimW2 = 4;
int64_t perm_t[] = { 0, 2, 3, 1};
int* W2_t = transpose(W2, shapeW2, dimW2, perm_t);
int* conv2 = (int*) malloc(sizeof(int)*14*14*2);
memset(conv2, 0, sizeof(sizeof(int)*14*14*2));
conv2D(maxpool1, 14, 14, 2, W2_t, 2, 3, 3, 1, 1, 1, 1, B2, conv2, 14, 14);
free(W2_t);
free(maxpool1);
printf("Conv2D ok\r\n");
// 5. Relu
int* relu2 = (int*) malloc(sizeof(int)*14*14*2);
relu(conv2, 14*14*2, relu2);
free(conv2);
printf("Relu ok\r\n");
// 6. Maxpool
int* maxpool2 = (int*) malloc(sizeof(int)*7*7*2);
memset(maxpool2, 0, sizeof(sizeof(int)*7*7*2));
maxpool(relu2, 14, 14, 2, 2, 2, 0, 0, 2, 2, 7, 7, maxpool2);
free(relu2);
printf("Maxpool ok\r\n");
// Flatten NOT REQUIRED
// 7. Dense
int64_t shapeW1[] = {98, 4};
int64_t dimW1 = 2;
int64_t permW1_t[] = { 1, 0};
int* W1_t = transpose(W1, shapeW1, dimW1, permW1_t);
int* dense1 = (int*) malloc(sizeof(int)*4);
memset(dense1, 0, sizeof(sizeof(int)*4));
dense(maxpool2, W1_t, 98, 4, B1, dense1);
free(W1_t);
free(maxpool2);
printf("Dense ok\r\n");
// 8. Dense
int64_t shapeW[] = {4, 10};
int64_t dimW = 2;
int64_t permW_t[] = { 1, 0};
int* W_t = transpose(W, shapeW, dimW, permW_t);
int* dense2 = (int*) malloc(sizeof(int)*10);
memset(dense2, 0, sizeof(sizeof(int)*10));
dense(dense1, W_t, 4, 10, B, dense2);
free(W_t);
free(dense1);
printf("Dense ok\r\n");
// 9. Softmax
int* output = (int*) malloc(sizeof(int)*10);
memset(output, 0, sizeof(sizeof(int)*10));
softmax(dense2, 10, output);
printf("Softmax ok\r\n");
int max = 0;
int min = output[0];
int max_index = 0;
int min_index = 0;
printf("\n\rPredictions: \n\r");
for(int i = 0; i < 10; i++)
{
printf("%d ", output[i]);
if(output[i] > max)
{
max = output[i];
max_index = i;
}
if(output[i] < min)
{
min = output[i];
min_index = i;
}
}
printf("\n\r");
printf("\n\rThe number is %d\n\r", min_index);
free(dense2);
free(output);
printf("Result ok\r\n");
return 0;
}

View File

@@ -1,73 +0,0 @@
#ifndef __MNIST_INT_H__
#define __MNIST_INT_H__
#include <stdio.h>
#include <stdint.h>
#define IMG0 {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,380,376,301,462,239,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,352,541,921,921,921,921,921,921,984,984,972,996,960,921,745,82,0,0,0,0,0,0,0,0,0,0,0,549,984,996,996,996,996,996,996,996,996,996,996,996,996,996,996,741,90,0,0,0,0,0,0,0,0,0,0,886,996,815,780,780,780,780,545,239,239,239,239,239,501,870,996,996,741,82,0,0,0,0,0,0,0,0,0,149,321,50,0,0,0,0,0,0,0,0,0,0,0,133,835,996,996,450,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,329,996,996,917,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,329,996,996,917,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,415,615,996,996,952,200,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,98,458,894,894,894,992,996,996,996,996,941,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,266,466,862,996,996,996,996,996,996,996,996,996,556,0,0,0,0,0,0,0,0,0,0,0,0,0,145,733,992,996,996,996,874,807,807,294,266,843,996,996,458,0,0,0,0,0,0,0,0,0,0,0,0,443,858,996,949,890,450,349,121,0,0,0,0,784,996,945,160,0,0,0,0,0,0,0,0,0,0,0,0,662,996,690,243,0,0,0,0,0,0,0,188,905,996,917,0,0,0,0,0,0,0,0,0,0,0,0,0,70,486,0,0,0,0,0,0,0,0,0,329,996,996,650,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,545,996,933,223,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,823,980,996,658,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,949,996,937,223,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,349,984,945,337,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,19,807,964,615,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,15,458,270,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}
#define IMG0_LABEL 7
#define IMG1 {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,121,517,996,992,996,835,321,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,82,556,913,988,992,988,992,988,874,78,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,482,996,992,996,992,878,796,796,874,1000,835,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,796,992,988,992,831,78,0,0,239,992,988,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,160,952,878,796,717,160,596,117,0,0,1000,992,400,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,156,78,0,0,400,992,196,0,321,992,988,78,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,321,839,121,443,913,996,913,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,243,400,321,160,992,909,992,988,913,196,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,596,992,996,992,996,992,996,913,482,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,596,988,992,988,992,988,752,196,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,243,717,796,952,996,992,243,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,156,674,988,796,78,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,82,0,0,0,0,0,0,0,0,0,717,996,439,0,0,0,0,0,0,0,0,0,0,0,0,0,0,243,796,639,0,0,0,0,0,0,0,0,239,992,592,0,0,0,0,0,0,0,0,0,0,0,0,0,82,839,752,0,0,0,0,0,0,0,0,43,835,996,592,0,0,0,0,0,0,0,0,0,0,0,0,0,400,992,592,0,0,0,0,0,0,0,160,835,988,992,435,0,0,0,0,0,0,0,0,0,0,0,0,0,160,1000,835,360,200,0,0,121,360,678,992,996,992,556,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,674,988,992,988,796,796,913,988,992,988,992,509,78,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,82,796,1000,992,996,992,996,992,956,796,321,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,78,592,592,992,670,592,592,156,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}
#define IMG1_LABEL 3
#define TOTAL_IMAGE 2
static const signed char label[] = {IMG0_LABEL, IMG1_LABEL};
static const int W3[] = {-323,-426,-651,790,-221,37,398,221,797,254,307,625,-589,203,-64,-1566,-376,-644};
static const int B3[] = {-829,-140};
static const int W2[] = {7,231,36,-146,-155,4,273,-27,234,-635,-556,-770,156,710,239,1820,-18,1574,1723,-596,1399,335,568,379,35,-182,-32,6,-2,-5,293,137,355,2,2,-22};
static const int B2[] = {-116,-3};
static const int W1[] = {157,-226,21,25,8,-775,-415,-125,-396,335,-631,-28,-506,-357,-3780,-826,102,571,-625,66,559,253,-3075,-695,253,317,-866,127,831,266,-2586,-572,297,162,-991,77,891,168,-2524,-563,416,-108,-1022,206,398,-160,-1918,-483,57,-1257,-231,1051,-798,-1626,-260,-76,-464,755,131,247,-1527,163,-75,-58,-338,1305,144,440,-310,154,5,-31,-159,661,83,265,-38,180,-7,54,-14,306,6,223,30,126,-28,111,35,46,-26,264,69,107,-30,95,248,-364,-102,496,40,20,-54,54,-71,-1538,-235,1589,-23,-249,18,80,51,614,157,128,-869,1376,430,134,-149,454,130,231,3,427,233,92,-60,464,103,250,-53,214,116,224,126,234,127,332,14,106,108,305,314,-71,134,454,54,74,97,274,486,-436,-135,572,135,-7,118,244,-375,-468,-564,865,340,-172,40,363,89,-498,476,21,285,617,705,-306,-570,-206,41,230,-179,-23,141,23,-641,-69,-85,164,-534,-101,-131,149,-635,-98,-232,154,-485,-190,-204,106,-529,-173,-362,122,-386,-247,-252,102,-145,-101,43,-171,-31,-301,-94,69,-549,668,145,-737,770,-412,101,52,254,227,-30,-83,-663,512,-121,-334,-75,-98,-16,-31,-435,94,-49,-77,-128,-89,-70,-10,-290,-13,-39,-23,-155,-52,-147,-75,-268,-35,-95,-15,-39,24,-196,-199,-203,-42,-187,-45,-10,148,-117,-418,-206,-24,-157,-55,-90,402,-357,-786,-79,162,-144,-274,268,688,-64,113,599,1294,-1250,608,123,158,-175,34,391,231,-756,200,79,14,-121,8,268,57,-526,124,80,-38,-88,0,286,-10,-393,111,65,-33,-74,-27,300,2,-479,-45,-10,39,-92,-192,154,212,-389,-373,-206,292,-129,-360,-554,457,-352,-947,-1248,887,336,3,-227,1456,549,383,-411,375,176,38,163,705,55,644,-207,146,103,197,174,365,-97,522,-184,-1,88,241,155,172,-105,382,-306,-162,115,307,158,-17,-50,262,-1299,-227,108,744,-398,16,100,-163,-649,-567,17,989,-1395,441,166,-191};
static const int B1[] = {1201,-1177,2169,-1961};
static const int W[] = {558,787,-40,-122,-412,-36,169,-147,-16,-280,18,62,495,339,-475,-140,-882,183,20,-137,-52,679,-280,-312,444,-261,-322,1032,-144,522,57,-965,-305,168,-532,426,-543,14,267,159};
static const int B[] = {41,1461,71,-1277,809,-1693,-297,-117,329,659};
/*
#define IMG0 {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3803,3764,3019,4627,2392,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3529,5411,9215,9215,9215,9215,9215,9215,9843,9843,9725,9960,9607,9215,7450,823,0,0,0,0,0,0,0,0,0,0,0,5490,9843,9960,9960,9960,9960,9960,9960,9960,9960,9960,9960,9960,9960,9960,9960,7411,901,0,0,0,0,0,0,0,0,0,0,8862,9960,8156,7803,7803,7803,7803,5450,2392,2392,2392,2392,2392,5019,8705,9960,9960,7411,823,0,0,0,0,0,0,0,0,0,1490,3215,509,0,0,0,0,0,0,0,0,0,0,0,1333,8352,9960,9960,4509,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3294,9960,9960,9176,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3294,9960,9960,9176,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4156,6156,9960,9960,9529,2000,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,980,4588,8941,8941,8941,9921,9960,9960,9960,9960,9411,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2666,4666,8627,9960,9960,9960,9960,9960,9960,9960,9960,9960,5568,0,0,0,0,0,0,0,0,0,0,0,0,0,1450,7333,9921,9960,9960,9960,8745,8078,8078,2941,2666,8431,9960,9960,4588,0,0,0,0,0,0,0,0,0,0,0,0,4431,8588,9960,9490,8901,4509,3490,1215,0,0,0,0,7843,9960,9450,1607,0,0,0,0,0,0,0,0,0,0,0,0,6627,9960,6901,2431,0,0,0,0,0,0,0,1882,9058,9960,9176,0,0,0,0,0,0,0,0,0,0,0,0,0,705,4862,0,0,0,0,0,0,0,0,0,3294,9960,9960,6509,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,5450,9960,9333,2235,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,8235,9803,9960,6588,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9490,9960,9372,2235,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3490,9843,9450,3372,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,196,8078,9647,6156,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,156,4588,2705,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}
#define IMG0_LABEL 7
#define IMG1 {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1215,5176,9960,9921,9960,8352,3215,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,823,5568,9137,9882,9921,9882,9921,9882,8745,784,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4823,9960,9921,9960,9921,8784,7960,7960,8745,10000,8352,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,7960,9921,9882,9921,8313,784,0,0,2392,9921,9882,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1607,9529,8784,7960,7176,1607,5960,1176,0,0,10000,9921,4000,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1568,784,0,0,4000,9921,1960,0,3215,9921,9882,784,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3215,8392,1215,4431,9137,9960,9137,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2431,4000,3215,1607,9921,9098,9921,9882,9137,1960,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,5960,9921,9960,9921,9960,9921,9960,9137,4823,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,5960,9882,9921,9882,9921,9882,7529,1960,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2431,7176,7960,9529,9960,9921,2431,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1568,6745,9882,7960,784,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,823,0,0,0,0,0,0,0,0,0,7176,9960,4392,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2431,7960,6392,0,0,0,0,0,0,0,0,2392,9921,5921,0,0,0,0,0,0,0,0,0,0,0,0,0,823,8392,7529,0,0,0,0,0,0,0,0,431,8352,9960,5921,0,0,0,0,0,0,0,0,0,0,0,0,0,4000,9921,5921,0,0,0,0,0,0,0,1607,8352,9882,9921,4352,0,0,0,0,0,0,0,0,0,0,0,0,0,1607,10000,8352,3607,2000,0,0,1215,3607,6784,9921,9960,9921,5568,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,6745,9882,9921,9882,7960,7960,9137,9882,9921,9882,9921,5098,784,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,823,7960,10000,9921,9960,9921,9960,9921,9568,7960,3215,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,784,5921,5921,9921,6705,5921,5921,1568,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}
#define IMG1_LABEL 3
#define TOTAL_IMAGE 2
static const signed char label[] = {IMG0_LABEL, IMG1_LABEL};
static const int W3[] = {-3233,-4261,-6519,7906,-2210,371,3984,2212,7975,2549,3076,6250,-5895,2037,-647,-15660,-3767,-6443};
static const int B3[] = {-8293,-1409};
static const int W2[] = {70,2319,368,-1468,-1559,44,2732,-275,2340,-6354,-5564,-7705,1560,7101,2395,18201,-183,15745,17230,-5966,13997,3351,5684,3797,350,-1828,-322,69,-26,-57,2935,1379,3558,22,25,-226};
static const int B2[] = {-1165,-36};
static const int W1[] = {1579,-2264,212,255,87,-7751,-4159,-1258,-3963,3354,-6319,-287,-5066,-3574,-37807,-8261,1022,5711,-6256,669,5596,2537,-30759,-6959,2531,3173,-8664,1275,8313,2666,-25865,-5720,2974,1623,-9915,779,8913,1685,-25247,-5639,4167,-1080,-10229,2062,3988,-1602,-19185,-4837,573,-12573,-2311,10518,-7981,-16263,-2600,-764,-4646,7558,1318,2474,-15276,1636,-754,-585,-3385,13052,1444,4408,-3103,1541,53,-317,-1599,6612,832,2651,-384,1805,-73,541,-142,3065,61,2231,303,1269,-281,1118,353,468,-265,2645,699,1071,-303,952,2480,-3649,-1027,4960,400,209,-547,541,-718,-15381,-2356,15890,-230,-2493,187,804,519,6141,1578,1288,-8691,13761,4305,1347,-1497,4542,1307,2311,36,4274,2339,920,-602,4642,1039,2504,-532,2146,1169,2240,1263,2349,1277,3324,140,1063,1087,3052,3141,-716,1348,4541,546,745,973,2748,4866,-4363,-1358,5724,1359,-74,1185,2448,-3753,-4687,-5648,8657,3407,-1721,406,3630,895,-4989,4768,217,2856,6174,7059,-3063,-5705,-2069,419,2304,-1790,-237,1411,234,-6417,-699,-858,1646,-5346,-1016,-1311,1490,-6350,-989,-2324,1540,-4858,-1904,-2046,1062,-5291,-1735,-3627,1222,-3865,-2478,-2522,1026,-1450,-1011,437,-1715,-313,-3013,-940,698,-5491,6684,1457,-7375,7700,-4125,1011,528,2546,2275,-302,-832,-6638,5122,-1210,-3340,-750,-982,-160,-318,-4358,943,-498,-777,-1282,-896,-701,-107,-2909,-131,-397,-234,-1553,-520,-1477,-755,-2686,-352,-956,-154,-390,242,-1960,-1999,-2030,-426,-1877,-451,-101,1482,-1170,-4180,-2068,-240,-1578,-556,-903,4025,-3574,-7861,-799,1620,-1446,-2749,2683,6881,-641,1136,5998,12947,-12500,6082,1234,1580,-1750,342,3910,2319,-7568,2004,791,142,-1213,85,2689,570,-5261,1248,806,-385,-889,7,2863,-108,-3930,1114,656,-337,-745,-273,3002,29,-4795,-452,-102,393,-923,-1924,1540,2123,-3898,-3738,-2064,2920,-1299,-3604,-5544,4572,-3526,-9479,-12481,8870,3362,35,-2276,14563,5495,3839,-4119,3758,1768,381,1635,7051,550,6445,-2072,1461,1031,1971,1742,3657,-978,5229,-1845,-13,886,2418,1554,1722,-1053,3821,-3065,-1629,1154,3075,1586,-177,-502,2623,-12994,-2270,1085,7447,-3980,168,1006,-1635,-6495,-5674,179,9896,-13958,4412,1664,-1919};
static const int B1[] = {12019,-11770,21698,-19615};
static const int W[] = {5580,7870,-409,-1225,-4126,-360,1691,-1471,-164,-2805,187,629,4956,3393,-4754,-1405,-8827,1835,208,-1378,-522,6792,-2802,-3127,4441,-2610,-3221,10321,-1444,5221,575,-9654,-3051,1685,-5320,4268,-5434,146,2679,1592};
static const int B[] = {414,14614,715,-12774,8092,-16933,-2974,-1177,3292,6596};
*/
int mnist(void);
#endif //__MNIST_INT_H__

View File

@@ -1,35 +0,0 @@
#include "onnx.h"
void add(const int *input, // pointer to vector
const int *bias, // pointer to matrix
const uint16_t dim_vec, // length of the vector
int *output)
{
for (int i = 0; i < dim_vec; i++)
{
output[i] = input[i] + bias[i];
}
}
int* add_layer(Onnx__GraphProto* graph, const int *input, int64_t* shapeInput, int64_t* shapeOutput, const char* layer_name)
{
//assert(graph != NULL && input != NULL && layer_name != "" );
Onnx__NodeProto* node = onnx_graph_get_node_by_name(graph, layer_name);
const char* bias = node->input[1];
int* B = onnx_graph_get_weights_by_name(graph, bias);
int64_t* shapeB = onnx_graph_get_dims_by_name(graph, bias);
if(shapeB == NULL)
{
return NULL;
}
int* output = (int*) malloc(sizeof(int)*shapeB[0]);
memset(output, 0, sizeof(sizeof(int)*shapeB[0]));
add(input, B, shapeB[0], output);
memcpy(shapeInput, shapeOutput, sizeof(int64_t)*3);
return output;
}

View File

@@ -1,113 +0,0 @@
#include "onnx.h"
void conv2D(const int *input, // input image
const uint16_t dim_im_in_x, // input image dimention x
const uint16_t dim_im_in_y, // input image dimention y
const uint16_t ch_im_in, // number of input image channels
const int *weight, // kernel weights
const uint16_t ch_im_out, // number of filters, i.e., output image channels
const uint16_t dim_kernel_x, // filter kernel size x
const uint16_t dim_kernel_y, // filter kernel size y
const uint16_t padding_x, // padding sizes x
const uint16_t padding_y, // padding sizes y
const uint16_t stride_x, // stride x
const uint16_t stride_y, // stride y
const int *bias, // bias
int *output, // output image
const uint16_t dim_im_out_x, // output image dimension x
const uint16_t dim_im_out_y // output image dimension y
)
{
int i, j, k, l, m, n;
int conv_out = 0.0f;
int in_row, in_col;
// For each filter
for (i = 0; i < ch_im_out; i++)
{
// For each image dimension
for (j = 0; j < dim_im_out_y; j++)
{
for (k = 0; k < dim_im_out_x; k++)
{
conv_out = bias[i];
// For each kernel dimension
for (m = 0; m < dim_kernel_y; m++)
{
for (n = 0; n < dim_kernel_x; n++)
{
// if-for implementation
in_row = stride_y * j + m - padding_y;
in_col = stride_x * k + n - padding_x;
if (in_row >= 0 && in_col >= 0 && in_row < dim_im_in_y && in_col < dim_im_in_x)
{
// For each input channel
for (l = 0; l < ch_im_in; l++)
{
conv_out += input[(in_row * dim_im_in_x + in_col) * ch_im_in + l] *
weight[i * ch_im_in * dim_kernel_y * dim_kernel_x + (m * dim_kernel_x + n) * ch_im_in +
l];
}
}
}
}
output[i + (j * dim_im_out_x + k) * ch_im_out] = conv_out;
}
}
}
}
int* conv2D_layer(Onnx__GraphProto* graph, const int *input, int64_t* shapeInput, int64_t* shapeOutput, const char* layer_name)
{
//assert(graph != NULL && input != NULL && layer_name != "" );
Onnx__NodeProto* node = onnx_graph_get_node_by_name(graph, layer_name);
if(node == NULL)
{
// layer not found
return NULL;
}
const char* weight = node->input[1];
const char* bias = node->input[2];
// Get weight shape
int64_t* shapeW = onnx_graph_get_dims_by_name(graph, weight);
if(shapeW == NULL)
{
return NULL;
}
int64_t dimW = onnx_graph_get_dim_by_name(graph, weight);
if(dimW < 0)
{
return NULL;
}
// Get weights
// NCWH --> NWHC
int64_t permW_t[] = { 0, 2, 3, 1};
int* W = onnx_graph_get_weights_by_name(graph, weight);
if(W == NULL)
{
return NULL;
}
int* W_t = transpose(W, shapeW, dimW, permW_t);
// Get bias
int* B = onnx_graph_get_weights_by_name(graph, bias);
if(B == NULL)
{
return NULL;
}
int* output = (int*) malloc(sizeof(int)*shapeW[0]*shapeInput[W_INDEX]*shapeInput[H_INDEX]);
memset(output, 0, sizeof(sizeof(int)*shapeW[0]*shapeInput[W_INDEX]*shapeInput[H_INDEX]));
conv2D(input, shapeInput[W_INDEX], shapeInput[H_INDEX], shapeW[1], W_t, shapeW[0], shapeW[2], shapeW[3], 1, 1, 1, 1, B, output, shapeInput[W_INDEX], shapeInput[H_INDEX]);
shapeOutput[W_INDEX] = shapeInput[W_INDEX];
shapeOutput[H_INDEX] = shapeInput[H_INDEX];
shapeOutput[C_INDEX] = shapeW[0];
free(W_t);
return output;
}

View File

@@ -1,19 +0,0 @@
#include "onnx.h"
void dense(const int *input, // pointer to vector
const int *weight, // pointer to matrix
const uint16_t dim_vec, // length of the vector
const uint16_t num_of_rows, // numCol of A
const int *bias,
int *output) // output operand
{
for (int i = 0; i < num_of_rows; i++)
{
int ip_out = bias[i];
for (int j = 0; j < dim_vec; j++)
{
ip_out += input[j] * weight[i * dim_vec + j];
}
output[i] = ip_out;
}
}

View File

@@ -1,25 +0,0 @@
#include "onnx.h"
void onnx_tensor_info(const int* A, int64_t* shape, int64_t dim)
{
int elem = 1;
for(int i = 0; i < dim; i++)
{
elem = elem * shape[i];
}
printf("Array size: %d\n", elem);
for(int i = 0; i < elem; i++)
{
printf( "%f ", A[i] );
int split = 1;
for(int j = dim-1; j > 0; j--)
{
split = split * shape[j];
if( (i+1) % split == 0)
{
printf("\n");
}
}
}
}

View File

@@ -1,63 +0,0 @@
#include "onnx.h"
void matmul(const int *input, // pointer to vector
const int *weight, // pointer to matrix
const uint16_t dim_vec, // length of the vector
const uint16_t num_of_rows, // numCol of A
int *output)
{
for (int i = 0; i < num_of_rows; i++)
{
int ip_out = 0;
for (int j = 0; j < dim_vec; j++)
{
ip_out += input[j] * weight[i * dim_vec + j];
}
output[i] = ip_out;
}
}
int* matmul_layer(Onnx__GraphProto* graph, const int *input, int64_t* shapeInput, int64_t* shapeOutput, const char* layer_name)
{
//assert(graph != NULL && input != NULL && layer_name != "" );
Onnx__NodeProto* node = onnx_graph_get_node_by_name(graph, layer_name);
const char* weight = node->input[1];
int64_t* shapeW = onnx_graph_get_dims_by_name(graph, weight);
if(shapeW == NULL)
{
return NULL;
}
int64_t dimW = onnx_graph_get_dim_by_name(graph, weight);
if(dimW < 0)
{
return NULL;
}
//assert(shapeW[0] == shapeInput[1]);
int64_t permW_t[] = {1, 0};
int* W = onnx_graph_get_weights_by_name(graph, weight);
if(W == NULL)
{
return NULL;
}
int* W_t = transpose(W, shapeW, dimW, permW_t);
int* output = (int*) malloc(sizeof(int)*shapeW[1]);
if(output == NULL)
{
// No memory
return NULL;
}
memset(output, 0, sizeof(sizeof(int)*shapeW[1]));
matmul(input, W_t, shapeW[0], shapeW[1], output);
shapeOutput[0] = shapeInput[0];
shapeOutput[1] = shapeW[1];
free(W_t);
return output;
}

View File

@@ -1,96 +0,0 @@
#include "onnx.h"
void maxpool(const int *input,
const uint16_t dim_im_in_x, // input image dimension x or W
const uint16_t dim_im_in_y, // input image dimension y or H
const uint16_t ch_im_in, // number of input image channels
const uint16_t dim_kernel_x, // window kernel size
const uint16_t dim_kernel_y, // window kernel size
const uint16_t padding_x, // padding sizes
const uint16_t padding_y, // padding sizes
const uint16_t stride_x, // stride
const uint16_t stride_y, // stride
const uint16_t dim_im_out_x, // output image dimension x or W
const uint16_t dim_im_out_y, // output image dimension y or H
int *output)
{
int16_t i_ch_in, i_x, i_y;
int16_t k_x, k_y;
for (i_ch_in = 0; i_ch_in < ch_im_in; i_ch_in++)
{
for (i_y = 0; i_y < dim_im_out_y; i_y++)
{
for (i_x = 0; i_x < dim_im_out_x; i_x++)
{
//int max = FLT_MIN;
int max = 0;
for (k_y = i_y * stride_y - padding_y; k_y < i_y * stride_y - padding_y + dim_kernel_y; k_y++)
{
for (k_x = i_x * stride_x - padding_x; k_x < i_x * stride_x - padding_x + dim_kernel_x; k_x++)
{
if (k_y >= 0 && k_x >= 0 && k_y < dim_im_in_y && k_x < dim_im_in_x)
{
if (input[i_ch_in + ch_im_in * (k_x + k_y * dim_im_in_x)] > max)
{
max = input[i_ch_in + ch_im_in * (k_x + k_y * dim_im_in_x)];
}
}
}
}
output[i_ch_in + ch_im_in * (i_x + i_y * dim_im_out_x)] = max;
}
}
}
}
int* maxpool_layer(Onnx__GraphProto* graph, int* input, int64_t* shapeInput, int64_t* shapeOutput, const char* layer_name)
{
//assert(graph != NULL && input != NULL && layer_name != "" );
Onnx__NodeProto* node = onnx_graph_get_node_by_name(graph, layer_name);
if(node == NULL)
{
// layer not found
return NULL;
}
uint16_t kernel_x = 1;
uint16_t kernel_y = 1;
uint16_t padding_x = 0;
uint16_t padding_y = 0;
uint16_t stride_x = 1;
uint16_t stride_y = 1;
for(int i = 0; i < node->n_attribute; i++)
{
if( strcmp(node->attribute[i]->name, "kernel_shape") == 0 )
{
kernel_x = node->attribute[i]->ints[0];
kernel_y = node->attribute[i]->ints[1];
}
if( strcmp(node->attribute[i]->name, "strides") == 0 )
{
stride_x = node->attribute[i]->ints[0];
stride_y = node->attribute[i]->ints[1];
}
}
uint16_t out_x = (shapeInput[W_INDEX] - kernel_x + 2 * padding_x) / stride_x + 1;
uint16_t out_y = (shapeInput[H_INDEX] - kernel_y + 2 * padding_y) / stride_y + 1;
int* output = (int*) malloc(sizeof(int)*out_x*out_y*shapeInput[C_INDEX]);
if(output == NULL)
{
// No memory
return NULL;
}
memset(output, 0, sizeof(sizeof(int)*out_x*out_y*shapeInput[C_INDEX]));
maxpool(input, shapeInput[W_INDEX], shapeInput[H_INDEX], shapeInput[C_INDEX], kernel_x, kernel_y, padding_x, padding_y, stride_x, stride_y, out_x, out_y, output);
shapeOutput[W_INDEX] = out_x;
shapeOutput[H_INDEX] = out_y;
shapeOutput[C_INDEX] = shapeInput[C_INDEX];
return output;
}

View File

@@ -1,84 +0,0 @@
#include <inttypes.h>
#include "onnx.h"
int* onnx_model_run(Onnx__ModelProto* model, int* input, int64_t* shapeInput)
{
int64_t* shapeOutput = (int64_t*) malloc(sizeof(int64_t)*3);
shapeOutput[0] = -1; shapeOutput[1] = -1; shapeOutput[2] = -1;
Onnx__NodeProto* node = onnx_graph_get_node_by_input(model->graph, model->graph->input[0]->name);
int i = 0;
int* output;
while(node != NULL)
{
printf("[%2d] %-10s %-20s ", i++, node->op_type, node->name);
if(strcmp(node->op_type, "Conv") == 0)
{
output = conv2D_layer(model->graph, input, shapeInput, shapeOutput, node->name);
}
else if(strcmp(node->op_type, "Relu") == 0)
{
output = relu_layer(model->graph, input, shapeInput, shapeOutput, node->name);
}
else if(strcmp(node->op_type, "MaxPool") == 0)
{
output = maxpool_layer(model->graph, input, shapeInput, shapeOutput, node->name);
}
else if(strcmp(node->op_type, "Softmax") == 0)
{
output = softmax_layer(model->graph, input, shapeInput, shapeOutput, node->name);
}
else if(strcmp(node->op_type, "MatMul") == 0)
{
output = matmul_layer(model->graph, input, shapeInput, shapeOutput, node->name);
}
else if(strcmp(node->op_type, "Add") == 0)
{
output = add_layer(model->graph, input, shapeInput, shapeOutput, node->name);
}
else if(strcmp(node->op_type, "Identity") == 0)
{
node = onnx_graph_get_node_by_input(model->graph, node->output[0]);
printf("\n");
continue;
}
else if(strcmp(node->op_type, "Transpose") == 0)
{
node = onnx_graph_get_node_by_input(model->graph, node->output[0]);
printf("\n");
continue;
}
else if(strcmp(node->op_type, "Reshape") == 0)
{
shapeOutput[1] = shapeOutput[0] * shapeOutput[1] * shapeOutput[2];
shapeOutput[2] = 1;
shapeOutput[0] = 1;
printf("[%2" PRId64 ", %2" PRId64 ", %2" PRId64 "] --> [%2" PRId64 ", %2" PRId64 ", %2" PRId64 "]\n", shapeInput[0], shapeInput[1], shapeInput[2], shapeOutput[0], shapeOutput[1], shapeOutput[2]);
// free(input);
// input = output;
memcpy(shapeInput, shapeOutput, sizeof(int64_t)*3);
node = onnx_graph_get_node_by_input(model->graph, node->output[0]);
continue;
}
else
{
printf("Unsupported operand: %s\n", node->op_type);
}
printf("[%2" PRId64 ", %2" PRId64 ", %2" PRId64 "] --> [%2" PRId64 ", %2" PRId64 ", %2" PRId64 "]\n", shapeInput[0], shapeInput[1], shapeInput[2], shapeOutput[0], shapeOutput[1], shapeOutput[2]);
free(input);
input = output;
memcpy(shapeInput, shapeOutput, sizeof(int64_t)*3);
node = onnx_graph_get_node_by_input(model->graph, node->output[0]);
}
output = input;
free(shapeOutput);
return output;
}

View File

@@ -1,284 +0,0 @@
#include "onnx-parser.h"
const char* onnx_tensor_proto_data_type[] = {
"Undefined",
"FLOAT",
"UINT8",
"INT8",
"UINT16",
"INT16",
"INT32",
"INT64",
"STRING",
"BOOL",
"FLOAT16",
"DOUBLE",
"UINT32",
"UINT64",
"COMPLEX64",
"COMPLEX128"
};
/*
Onnx__ModelProto* onnx_load_model(const char* onnx_file_name)
{
unsigned char* buffer;
FILE *fp;
// Get File Size
fp = fopen(onnx_file_name,"rb");
fseek(fp, 0L, SEEK_END);
int sz = ftell(fp);
fseek(fp, 0L, SEEK_SET);
// printf("File size %s is %d\n", onnx_file_name, sz);
// Read File
buffer = (unsigned char*) malloc(sizeof(unsigned char) * sz);
if(buffer == NULL)
{
printf("Failed to malloc %d bytes memory for %s\n", sz, onnx_file_name);
return NULL;
}
fread(buffer, sz, 1, fp);
Onnx__ModelProto* model = onnx__model_proto__unpack(NULL, sz, buffer);
free(buffer);
fclose(fp);
return model;
}
*/
void onnx_model_info(Onnx__ModelProto* model)
{
printf("---- Model info ----\n");
printf("IR Version is %ld\n", model->ir_version);
printf("Produceer name is %s\n", model->producer_name);
printf("Produceer version is %s\n", model->producer_version);
printf("Produceer version is %s\n", model->domain);
}
void onnx_graph_info(Onnx__GraphProto* graph)
{
printf("---- Graph Info ----\n");
// Input
printf("---- Graph Input Info ----\n");
printf("Graph inputs number: %ld\n", graph->n_input);
for(int i = 0; i < graph->n_input; i++)
{
onnx_graph_input_info(graph->input[i]);
}
// Output
printf("---- Graph Output Info ----\n");
printf("Graph outputs number: %ld\n", graph->n_output);
for(int i = 0; i < graph->n_output; i++)
{
onnx_graph_output_info(graph->output[i]);
}
// Nodes
printf("---- Graph Node Info ----\n");
printf("Graph nodes number: %ld\n", graph->n_node);
for(int i = 0; i < graph->n_node; i++)
{
onnx_graph_node_info(graph->node[i]);
}
}
void onnx_graph_info_sorted(Onnx__GraphProto* graph)
{
printf("---- Graph Info ----\n");
// Input
printf("---- Graph Input Info ----\n");
printf("Graph inputs number: %ld\n", graph->n_input);
for(int i = 0; i < graph->n_input; i++)
{
onnx_graph_input_info(graph->input[i]);
}
// Output
printf("---- Graph Output Info ----\n");
printf("Graph outputs number: %ld\n", graph->n_output);
for(int i = 0; i < graph->n_output; i++)
{
onnx_graph_output_info(graph->output[i]);
}
// Nodes
printf("---- Graph Node Info ----\n");
printf("Graph nodes number: %ld\n", graph->n_node);
Onnx__NodeProto* node = onnx_graph_get_node_by_input(graph, graph->input[0]->name);
while(node != NULL)
{
onnx_graph_node_info(node);
node = onnx_graph_get_node_by_input(graph, node->output[0]);
}
}
void onnx_graph_input_info(Onnx__ValueInfoProto* input)
{
printf("Input name %s\n", input->name);
Onnx__TypeProto* type = input->type;
Onnx__TypeProto__Tensor* tensor_type = type->tensor_type;
Onnx__TensorShapeProto* shape = tensor_type->shape;
printf("Input type %s\n", onnx_tensor_proto_data_type[tensor_type->elem_type]);
printf("Input dimension %ld\n", shape->n_dim);
for(int i = 0; i < shape->n_dim; i++)
{
onnx_graph_value_tensor_shape_dimension_info(shape->dim[i]);
if( i != shape->n_dim - 1)
{
printf(" x ");
}
}
printf("\n");
}
void onnx_graph_value_tensor_shape_dimension_info(Onnx__TensorShapeProto__Dimension* dim)
{
switch (dim->value_case)
{
case ONNX__TENSOR_SHAPE_PROTO__DIMENSION__VALUE__NOT_SET:
printf("?");
break;
case ONNX__TENSOR_SHAPE_PROTO__DIMENSION__VALUE_DIM_VALUE:
printf("%ld",dim->dim_value);
break;
case ONNX__TENSOR_SHAPE_PROTO__DIMENSION__VALUE_DIM_PARAM:
printf("%s",dim->dim_param);
break;
default:
printf("?");
break;
}
}
void onnx_graph_output_info(Onnx__ValueInfoProto* output)
{
printf("Output name %s\n", output->name);
Onnx__TypeProto* type = output->type;
Onnx__TypeProto__Tensor* tensor_type = type->tensor_type;
Onnx__TensorShapeProto* shape = tensor_type->shape;
printf("Output type %s\n", onnx_tensor_proto_data_type[tensor_type->elem_type]);
printf("Output dimension %ld\n", shape->n_dim);
for(int i = 0; i < shape->n_dim; i++)
{
onnx_graph_value_tensor_shape_dimension_info(shape->dim[i]);
if( i != shape->n_dim - 1)
{
printf(" x ");
}
}
printf("\n");
}
void onnx_graph_initializer_info(Onnx__TensorProto* initializer)
{
printf("%s: [", initializer->name);
for(int i = 0; i < initializer->n_dims; i++)
{
printf("%ld, ", initializer->dims[i]);
}
printf("]\n");
printf("%s: [", initializer->name);
for(int i = 0; i < initializer->n_float_data; i++)
{
printf("%f, ", initializer->float_data[i]);
}
printf("]\n");
}
Onnx__NodeProto* onnx_graph_get_node_by_name(Onnx__GraphProto* graph, const char* node_name)
{
for(int i = 0; i < graph->n_node; i++)
{
Onnx__NodeProto* node = graph->node[i];
for(int j = 0; j < node->n_input; j++)
{
if( strcmp(node->name, node_name) == 0)
{
return node;
}
}
}
return NULL;
}
Onnx__NodeProto* onnx_graph_get_node_by_input(Onnx__GraphProto* graph, const char* input_name)
{
for(int i = 0; i < graph->n_node; i++)
{
Onnx__NodeProto* node = graph->node[i];
for(int j = 0; j < node->n_input; j++)
{
if( strcmp(node->input[j], input_name) == 0)
{
return node;
}
}
}
return NULL;
}
int* onnx_graph_get_weights_by_name(Onnx__GraphProto* graph, const char* node_name)
{
Onnx__TensorProto** initializer = graph->initializer;
for(int i = 0; i < graph->n_initializer; i++)
{
if( strcmp(graph->initializer[i]->name, node_name) == 0)
{
return graph->initializer[i]->float_data;
}
}
return NULL;
}
long* onnx_graph_get_dims_by_name(Onnx__GraphProto* graph, const char* node_name)
{
Onnx__TensorProto** initializer = graph->initializer;
for(int i = 0; i < graph->n_initializer; i++)
{
if( strcmp(graph->initializer[i]->name, node_name) == 0)
{
return graph->initializer[i]->dims;
}
}
return NULL;
}
long onnx_graph_get_dim_by_name(Onnx__GraphProto* graph, const char* node_name)
{
Onnx__TensorProto** initializer = graph->initializer;
for(int i = 0; i < graph->n_initializer; i++)
{
if( strcmp(graph->initializer[i]->name, node_name) == 0)
{
return graph->initializer[i]->n_dims;
}
}
return -1;
}
void onnx_graph_node_info(Onnx__NodeProto* node)
{
printf("%-12s: %-30s -> %-30s [%s]\n", node->op_type, node->input[0], node->output[0], node->name);
}

View File

@@ -1,27 +0,0 @@
#ifndef __ONNX_PARSER_H__
#define __ONNX_PARSER_H__
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "onnx.pb-c.h"
Onnx__ModelProto* onnx_load_model(const char* onnx_file_name);
void onnx_model_info(Onnx__ModelProto* model);
void onnx_graph_info(Onnx__GraphProto* graph);
void onnx_graph_info_sorted(Onnx__GraphProto* graph);
void onnx_graph_input_info(Onnx__ValueInfoProto* input);
void onnx_graph_output_info(Onnx__ValueInfoProto* output);
void onnx_graph_node_info(Onnx__NodeProto* node);
void onnx_graph_initializer_info(Onnx__TensorProto* initializer);
Onnx__NodeProto* onnx_graph_get_node_by_name(Onnx__GraphProto* graph, const char* node_name);
Onnx__NodeProto* onnx_graph_get_node_by_input(Onnx__GraphProto* graph, const char* input_name);
long* onnx_graph_get_dims_by_name(Onnx__GraphProto* graph, const char* node_name);
long onnx_graph_get_dim_by_name(Onnx__GraphProto* graph, const char* node_name);
int* onnx_graph_get_weights_by_name(Onnx__GraphProto* graph, const char* node_name);
void onnx_graph_value_tensor_shape_dimension_info(Onnx__TensorShapeProto__Dimension* dim);
#endif //__ONNX_PARSER_H__

View File

@@ -1,95 +0,0 @@
#ifndef __ONNX_H__
#define __ONNX_H__
#include <stdio.h>
#include <stdint.h>
#include <string.h>
//#include <int.h>
#include <math.h>
#include <onnx-parser.h>
#define ONNX_USE_NWHC
#ifdef ONNX_USE_NWHC
// NWHC
#define W_INDEX 0
#define H_INDEX 1
#define C_INDEX 2
#else
// NCWH
#define C_INDEX 0
#define W_INDEX 1
#define H_INDEX 2
#endif
// Model
void onnx_tensor_info(const int* A, int64_t* shape, int64_t dim);
int* onnx_model_run(Onnx__ModelProto* model, int* input, int64_t* shapeInput);
// Layers
int* conv2D_layer(Onnx__GraphProto* graph, const int *input, int64_t* shapeInput, int64_t* shapeOutput, const char* layer_name);
int* relu_layer(Onnx__GraphProto* graph, const int *input, int64_t* shapeInput, int64_t* shapeOutput, const char* layer_name);
int* maxpool_layer(Onnx__GraphProto* graph, int* input, int64_t* shapeInput, int64_t* shapeOutput, const char* layer_name);
int* matmul_layer(Onnx__GraphProto* graph, const int *input, int64_t* shapeInput, int64_t* shapeOutput, const char* layer_name);
int* add_layer(Onnx__GraphProto* graph, const int *input, int64_t* shapeInput, int64_t* shapeOutput, const char* layer_name);
int* softmax_layer(Onnx__GraphProto* graph, const int *input, int64_t* shapeInput, int64_t* shapeOutput, const char* layer_name);
// Operators
int* transpose(const int* A, int64_t* shape, int64_t dim, int64_t* perm);
void conv2D(const int *input, // input image
const uint16_t dim_im_in_x, // input image dimention x
const uint16_t dim_im_in_y, // input image dimention y
const uint16_t ch_im_in, // number of input image channels
const int *weight, // kernel weights
const uint16_t ch_im_out, // number of filters, i.e., output image channels
const uint16_t dim_kernel_x, // filter kernel size x
const uint16_t dim_kernel_y, // filter kernel size y
const uint16_t padding_x, // padding sizes x
const uint16_t padding_y, // padding sizes y
const uint16_t stride_x, // stride x
const uint16_t stride_y, // stride y
const int *bias, // bias
int *output, // output image
const uint16_t dim_im_out_x, // output image dimension x
const uint16_t dim_im_out_y // output image dimension y
);
void relu(const int *input, uint32_t size, int* output);
void maxpool(const int *input,
const uint16_t dim_im_in_x, // input image dimension x or W
const uint16_t dim_im_in_y, // input image dimension y or H
const uint16_t ch_im_in, // number of input image channels
const uint16_t dim_kernel_x, // window kernel size
const uint16_t dim_kernel_y, // window kernel size
const uint16_t padding_x, // padding sizes
const uint16_t padding_y, // padding sizes
const uint16_t stride_x, // stride
const uint16_t stride_y, // stride
const uint16_t dim_im_out_x, // output image dimension x or W
const uint16_t dim_im_out_y, // output image dimension y or H
int *output);
void matmul(const int *input, // pointer to vector
const int *weight, // pointer to matrix
const uint16_t dim_vec, // length of the vector
const uint16_t num_of_rows, // numCol of A
int *output);
void add(const int *input, // pointer to vector
const int *bias, // pointer to matrix
const uint16_t dim_vec, // length of the vector
int *output);
void dense(const int *input, // pointer to vector
const int *weight, // pointer to matrix
const uint16_t dim_vec, // length of the vector
const uint16_t num_of_rows, // numCol of A
const int *bias,
int *output);
void softmax(const int *input, const uint32_t dim_vec, int *output);
#endif // __ONNX_H__

View File

@@ -1,27 +0,0 @@
#include "onnx.h"
void relu(const int *input, uint32_t size, int* output)
{
uint32_t i;
memcpy(output, input, sizeof(int) * size);
for (i = 0; i < size; i++)
{
if (output[i] < 0)
output[i] = 0;
}
}
int* relu_layer(Onnx__GraphProto* graph, const int *input, int64_t* shapeInput, int64_t* shapeOutput, const char* layer_name)
{
//assert(graph != NULL && input != NULL && layer_name != "" );
int64_t len = shapeInput[0] * shapeInput[1] * shapeInput[2];
int* output = (int*) malloc(sizeof(int)*len);
memset(output, 0, sizeof(sizeof(int)*len));
relu(input, len, output);
memcpy(shapeInput, shapeOutput, sizeof(int64_t)*3);
return output;
}

View File

@@ -1,57 +0,0 @@
#include "onnx.h"
//#include<math.h>
int abs_core(int x)
{
return x > 0?x:-x;
}
int exp_core(int x)
{
x = 1 + (x << 8);
x *= x; x *= x; x *= x; x *= x;
x *= x; x *= x; x *= x; x *= x;
return x;
}
void softmax(const int *input, const uint32_t dim_vec, int *output)
{
long long sum = 0;
for(int i = 0; i < dim_vec; i++)
{
output[i] = input[i] >> 16;
}
/*
for(int i = 0; i < dim_vec; i++)
{
output[i] = abs_core(input[i] >> 16);
sum = sum + (output[i]);
}
printf("sum = %ld\r\n" , sum);
for(int i = 0; i < dim_vec; i++)
{
//output[i] = output[i] / (sum);
output[i] = sum / output[i];
//output[i] = output[i];
}*/
}
int* softmax_layer(Onnx__GraphProto* graph, const int *input, int64_t* shapeInput, int64_t* shapeOutput, const char* layer_name)
{
//assert(graph != NULL && input != NULL && layer_name != "" && shapeInput[1] > 0);
int* output = (int*) malloc(sizeof(int)*shapeInput[1]);
memset(output, 0, sizeof(sizeof(int)*shapeInput[1]));
softmax(input, shapeInput[1], output);
memcpy(shapeInput, shapeOutput, sizeof(int64_t)*3);
return output;
}

View File

@@ -1,26 +0,0 @@
#include "tos_k.h"
#ifdef __CC_ARM
/* avoid the heap and heap-using library functions supplied by arm */
#pragma import(__use_no_heap)
#endif
void *malloc(size_t n)
{
return tos_mmheap_alloc(n);
}
void *realloc(void *rmem, size_t newsize)
{
return tos_mmheap_realloc(rmem, newsize);
}
void *calloc(size_t nelem, size_t elsize)
{
return tos_mmheap_calloc(nelem, elsize);
}
void free(void *rmem)
{
tos_mmheap_free(rmem);
}

View File

@@ -1,97 +0,0 @@
#include "onnx.h"
int* transpose(const int* A, int64_t* shape, int64_t dim, int64_t* perm)
{
// Get array size
int elem = 1;
for(int i = 0; i < dim; i++)
{
elem = elem * shape[i];
}
// Malloc memory for B
int* B = malloc(sizeof(int) * elem);
if(B == NULL)
{
return NULL;
}
// Malloc memory for shapeB
int* shapeB = malloc(sizeof(int) * dim);
if( shapeB == NULL)
{
return NULL;
}
for(int i = 0; i < dim; i++)
{
shapeB[i] = shape[perm[i]];
}
// Transpose
for(int src = 0; src < elem; src++)
{
// Get transposed B array
// A[1][0][3] -> B[3][1][0]
int temp = src;
int* indexA = malloc(sizeof(int) * dim);
if(indexA == NULL)
{
return NULL;
}
int* indexB = malloc(sizeof(int) * dim);
if(indexB == NULL)
{
return NULL;
}
for(int i = dim-1; i >= 0; i--)
{
indexA[i] = temp % shape[i];
temp = temp / shape[i];
}
for(int i = 0; i < dim; i++)
{
indexB[i] = indexA[perm[i]];
}
// Get transposed B index
// #15 A[1][0][3] -> B[3][1][0] #21
int dst = 0;
temp = 1;
for(int i = dim - 1; i >= 0; i--)
{
dst = dst + indexB[i] * temp;
temp = temp * shapeB[i];
}
B[dst] = A[src];
free(indexA);
free(indexB);
}
free(shapeB);
return B;
}
int* transpose_layer(Onnx__GraphProto* graph, const int *input, int64_t* shapeInput, int64_t* shapeOutput, const char* layer_name)
{
//assert(graph != NULL && input != NULL && layer_name != "" );
Onnx__NodeProto* node = onnx_graph_get_node_by_name(graph, layer_name);
if(node == NULL)
{
return NULL;
}
int64_t perm_t[3];
int64_t* perm = node->attribute[0]->ints;
perm_t[0] = perm[1] - 1;
perm_t[1] = perm[2] - 1;
perm_t[2] = perm[3] - 1;
int* output = transpose(input, shapeInput, 3, perm_t);
shapeOutput[0] = shapeInput[perm_t[0]];
shapeOutput[1] = shapeInput[perm_t[1]];
shapeOutput[2] = shapeInput[perm_t[2]];
return output;
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 39 KiB

View File

@@ -1,26 +0,0 @@
#include "tos_k.h"
#ifdef __CC_ARM
/* avoid the heap and heap-using library functions supplied by arm */
#pragma import(__use_no_heap)
#endif
void *malloc(size_t n)
{
return tos_mmheap_alloc(n);
}
void *realloc(void *rmem, size_t newsize)
{
return tos_mmheap_realloc(rmem, newsize);
}
void *calloc(size_t nelem, size_t elsize)
{
return tos_mmheap_calloc(nelem, elsize);
}
void free(void *rmem)
{
tos_mmheap_free(rmem);
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,981 +0,0 @@
/* Generated by the protocol buffer compiler. DO NOT EDIT! */
/* Generated from: src/onnx.proto */
#ifndef PROTOBUF_C_src_2fonnx_2eproto__INCLUDED
#define PROTOBUF_C_src_2fonnx_2eproto__INCLUDED
#include <protobuf-c.h>
PROTOBUF_C__BEGIN_DECLS
#if PROTOBUF_C_VERSION_NUMBER < 1000000
# error This file was generated by a newer version of protoc-c which is incompatible with your libprotobuf-c headers. Please update your headers.
#elif 1002001 < PROTOBUF_C_MIN_COMPILER_VERSION
# error This file was generated by an older version of protoc-c which is incompatible with your libprotobuf-c headers. Please regenerate this file with a newer version of protoc-c.
#endif
typedef struct _Onnx__AttributeProto Onnx__AttributeProto;
typedef struct _Onnx__ValueInfoProto Onnx__ValueInfoProto;
typedef struct _Onnx__NodeProto Onnx__NodeProto;
typedef struct _Onnx__ModelProto Onnx__ModelProto;
typedef struct _Onnx__StringStringEntryProto Onnx__StringStringEntryProto;
typedef struct _Onnx__GraphProto Onnx__GraphProto;
typedef struct _Onnx__TensorProto Onnx__TensorProto;
typedef struct _Onnx__TensorProto__Segment Onnx__TensorProto__Segment;
typedef struct _Onnx__TensorShapeProto Onnx__TensorShapeProto;
typedef struct _Onnx__TensorShapeProto__Dimension Onnx__TensorShapeProto__Dimension;
typedef struct _Onnx__TypeProto Onnx__TypeProto;
typedef struct _Onnx__TypeProto__Tensor Onnx__TypeProto__Tensor;
typedef struct _Onnx__OperatorSetIdProto Onnx__OperatorSetIdProto;
/* --- enums --- */
/*
* Note: this enum is structurally identical to the OpSchema::AttrType
* enum defined in schema.h. If you rev one, you likely need to rev the other.
*/
typedef enum _Onnx__AttributeProto__AttributeType {
ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__UNDEFINED = 0,
ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__FLOAT = 1,
ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__INT = 2,
ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__STRING = 3,
ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__TENSOR = 4,
ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__GRAPH = 5,
ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__FLOATS = 6,
ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__INTS = 7,
ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__STRINGS = 8,
ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__TENSORS = 9,
ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__GRAPHS = 10
PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE)
} Onnx__AttributeProto__AttributeType;
typedef enum _Onnx__TensorProto__DataType {
ONNX__TENSOR_PROTO__DATA_TYPE__UNDEFINED = 0,
/*
* Basic types.
*/
/*
* int
*/
ONNX__TENSOR_PROTO__DATA_TYPE__FLOAT = 1,
/*
* uint8_t
*/
ONNX__TENSOR_PROTO__DATA_TYPE__UINT8 = 2,
/*
* int8_t
*/
ONNX__TENSOR_PROTO__DATA_TYPE__INT8 = 3,
/*
* uint16_t
*/
ONNX__TENSOR_PROTO__DATA_TYPE__UINT16 = 4,
/*
* int16_t
*/
ONNX__TENSOR_PROTO__DATA_TYPE__INT16 = 5,
/*
* int32_t
*/
ONNX__TENSOR_PROTO__DATA_TYPE__INT32 = 6,
/*
* int64_t
*/
ONNX__TENSOR_PROTO__DATA_TYPE__INT64 = 7,
/*
* string
*/
ONNX__TENSOR_PROTO__DATA_TYPE__STRING = 8,
/*
* bool
*/
ONNX__TENSOR_PROTO__DATA_TYPE__BOOL = 9,
/*
* Advanced types
*/
ONNX__TENSOR_PROTO__DATA_TYPE__FLOAT16 = 10,
ONNX__TENSOR_PROTO__DATA_TYPE__DOUBLE = 11,
ONNX__TENSOR_PROTO__DATA_TYPE__UINT32 = 12,
ONNX__TENSOR_PROTO__DATA_TYPE__UINT64 = 13,
/*
* complex with float32 real and imaginary components
*/
ONNX__TENSOR_PROTO__DATA_TYPE__COMPLEX64 = 14,
/*
* complex with float64 real and imaginary components
*/
ONNX__TENSOR_PROTO__DATA_TYPE__COMPLEX128 = 15
PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(ONNX__TENSOR_PROTO__DATA_TYPE)
} Onnx__TensorProto__DataType;
/*
* Versioning
* ONNX versioning is specified in docs/IR.md and elaborated on in docs/Versioning.md
* To be compatible with both proto2 and proto3, we will use a version number
* that is not defined by the default value but an explicit enum number.
*/
typedef enum _Onnx__Version {
/*
* proto3 requires the first enum value to be zero.
* We add this just to appease the compiler.
*/
ONNX__VERSION___START_VERSION = 0,
/*
* The version field is always serialized and we will use it to store the
* version that the graph is generated from. This helps us set up version
* control.
* For the IR, we are using simple numbers starting with with 0x00000001,
* which was the version we published on Oct 10, 2017.
*/
ONNX__VERSION__IR_VERSION_2017_10_10 = 1,
/*
* IR_VERSION 2 published on Oct 30, 2017
* - Added type discriminator to AttributeProto to support proto3 users
*/
ONNX__VERSION__IR_VERSION_2017_10_30 = 2,
/*
* IR VERSION 3 published on Nov 3, 2017
* - For operator versioning:
* - Added new message OperatorSetIdProto
* - Added opset_import in ModelProto
* - For vendor extensions, added domain in NodeProto
*/
ONNX__VERSION__IR_VERSION = 3
PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(ONNX__VERSION)
} Onnx__Version;
/* --- messages --- */
/*
* Attributes
* A named attribute containing either singular int, integer, string, graph,
* and tensor values, or repeated int, integer, string, graph, and tensor values.
* An AttributeProto MUST contain the name field, and *only one* of the
* following content fields, effectively enforcing a C/C++ union equivalent.
*/
struct _Onnx__AttributeProto
{
ProtobufCMessage base;
/*
* The name field MUST be present for this version of the IR.
*/
/*
* namespace Attribute
*/
char *name;
/*
* if ref_attr_name is not empty, ref_attr_name is the attribute name in parent function.
* In this case, this AttributeProto does not contain data, and it's a reference of attribute
* in parent scope.
* NOTE: This should ONLY be used in function (sub-graph). It's invalid to be used in main graph.
*/
char *ref_attr_name;
/*
* A human-readable documentation for this attribute. Markdown is allowed.
*/
char *doc_string;
/*
* The type field MUST be present for this version of the IR.
* For 0.0.1 versions of the IR, this field was not defined, and
* implementations needed to use has_field hueristics to determine
* which value field was in use. For IR_VERSION 0.0.2 or later, this
* field MUST be set and match the f|i|s|t|... field in use. This
* change was made to accomodate proto3 implementations.
*/
/*
* discriminator that indicates which field below is in use
*/
protobuf_c_boolean has_type;
Onnx__AttributeProto__AttributeType type;
/*
* Exactly ONE of the following fields must be present for this version of the IR
*/
/*
* int
*/
protobuf_c_boolean has_f;
int f;
/*
* int
*/
protobuf_c_boolean has_i;
int64_t i;
/*
* UTF-8 string
*/
protobuf_c_boolean has_s;
ProtobufCBinaryData s;
/*
* tensor value
*/
Onnx__TensorProto *t;
/*
* graph
*/
Onnx__GraphProto *g;
/*
* list of floats
*/
size_t n_floats;
int *floats;
/*
* list of ints
*/
size_t n_ints;
int64_t *ints;
/*
* list of UTF-8 strings
*/
size_t n_strings;
ProtobufCBinaryData *strings;
/*
* list of tensors
*/
size_t n_tensors;
Onnx__TensorProto **tensors;
/*
* list of graph
*/
size_t n_graphs;
Onnx__GraphProto **graphs;
};
#define ONNX__ATTRIBUTE_PROTO__INIT \
{ PROTOBUF_C_MESSAGE_INIT (&onnx__attribute_proto__descriptor) \
, NULL, NULL, NULL, 0,0, 0,0, 0,0, 0,{0,NULL}, NULL, NULL, 0,NULL, 0,NULL, 0,NULL, 0,NULL, 0,NULL }
/*
* Defines information on value, including the name, the type, and
* the shape of the value.
*/
struct _Onnx__ValueInfoProto
{
ProtobufCMessage base;
/*
* This field MUST be present in this version of the IR.
*/
/*
* namespace Value
*/
char *name;
/*
* This field MUST be present in this version of the IR.
*/
Onnx__TypeProto *type;
/*
* A human-readable documentation for this value. Markdown is allowed.
*/
char *doc_string;
};
#define ONNX__VALUE_INFO_PROTO__INIT \
{ PROTOBUF_C_MESSAGE_INIT (&onnx__value_info_proto__descriptor) \
, NULL, NULL, NULL }
/*
* Nodes
* Computation graphs are made up of a DAG of nodes, which represent what is
* commonly called a "layer" or "pipeline stage" in machine learning frameworks.
* For example, it can be a node of type "Conv" that takes in an image, a filter
* tensor and a bias tensor, and produces the convolved output.
*/
struct _Onnx__NodeProto
{
ProtobufCMessage base;
/*
* namespace Value
*/
size_t n_input;
char **input;
/*
* namespace Value
*/
size_t n_output;
char **output;
/*
* An optional identifier for this node in a graph.
* This field MAY be absent in ths version of the IR.
*/
/*
* namespace Node
*/
char *name;
/*
* The symbolic identifier of the Operator to execute.
*/
/*
* namespace Operator
*/
char *op_type;
/*
* The domain of the OperatorSet that specifies the operator named by op_type.
*/
/*
* namespace Domain
*/
char *domain;
/*
* Additional named attributes.
*/
size_t n_attribute;
Onnx__AttributeProto **attribute;
/*
* A human-readable documentation for this node. Markdown is allowed.
*/
char *doc_string;
};
#define ONNX__NODE_PROTO__INIT \
{ PROTOBUF_C_MESSAGE_INIT (&onnx__node_proto__descriptor) \
, 0,NULL, 0,NULL, NULL, NULL, NULL, 0,NULL, NULL }
/*
* Models
* ModelProto is a top-level file/container format for bundling a ML model and
* associating its computation graph with metadata.
* The semantics of the model are described by the associated GraphProto.
*/
struct _Onnx__ModelProto
{
ProtobufCMessage base;
/*
* The version of the IR this model targets. See Version enum above.
* This field MUST be present.
*/
protobuf_c_boolean has_ir_version;
int64_t ir_version;
/*
* The OperatorSets this model relies on.
* All ModelProtos MUST have at least one entry that
* specifies which version of the ONNX OperatorSet is
* being imported.
* All nodes in the ModelProto's graph will bind against the operator
* with the same-domain/same-op_type operator with the HIGHEST version
* in the referenced operator sets.
*/
size_t n_opset_import;
Onnx__OperatorSetIdProto **opset_import;
/*
* The name of the framework or tool used to generate this model.
* This field SHOULD be present to indicate which implementation/tool/framework
* emitted the model.
*/
char *producer_name;
/*
* The version of the framework or tool used to generate this model.
* This field SHOULD be present to indicate which implementation/tool/framework
* emitted the model.
*/
char *producer_version;
/*
* Domain name of the model.
* We use reverse domain names as name space indicators. For example:
* `com.facebook.fair` or `com.microsoft.cognitiveservices`
* Together with `model_version` and GraphProto.name, this forms the unique identity of
* the graph.
*/
char *domain;
/*
* The version of the graph encoded. See Version enum below.
*/
protobuf_c_boolean has_model_version;
int64_t model_version;
/*
* A human-readable documentation for this model. Markdown is allowed.
*/
char *doc_string;
/*
* The parameterized graph that is evaluated to execute the model.
*/
Onnx__GraphProto *graph;
/*
* Named metadata values; keys should be distinct.
*/
size_t n_metadata_props;
Onnx__StringStringEntryProto **metadata_props;
};
#define ONNX__MODEL_PROTO__INIT \
{ PROTOBUF_C_MESSAGE_INIT (&onnx__model_proto__descriptor) \
, 0,0, 0,NULL, NULL, NULL, NULL, 0,0, NULL, NULL, 0,NULL }
/*
* StringStringEntryProto follows the pattern for cross-proto-version maps.
* See https://developers.google.com/protocol-buffers/docs/proto3#maps
*/
struct _Onnx__StringStringEntryProto
{
ProtobufCMessage base;
char *key;
char *value;
};
#define ONNX__STRING_STRING_ENTRY_PROTO__INIT \
{ PROTOBUF_C_MESSAGE_INIT (&onnx__string_string_entry_proto__descriptor) \
, NULL, NULL }
/*
* Graphs
* A graph defines the computational logic of a model and is comprised of a parameterized
* list of nodes that form a directed acyclic graph based on their inputs and outputs.
* This is the equivalent of the "network" or "graph" in many deep learning
* frameworks.
*/
struct _Onnx__GraphProto
{
ProtobufCMessage base;
/*
* The nodes in the graph, sorted topologically.
*/
size_t n_node;
Onnx__NodeProto **node;
/*
* The name of the graph.
*/
/*
* namespace Graph
*/
char *name;
/*
* A list of named tensor values, used to specify constant inputs of the graph.
* Each TensorProto entry must have a distinct name (within the list) that
* also appears in the input list.
*/
size_t n_initializer;
Onnx__TensorProto **initializer;
/*
* A human-readable documentation for this graph. Markdown is allowed.
*/
char *doc_string;
/*
* The inputs and outputs of the graph.
*/
size_t n_input;
Onnx__ValueInfoProto **input;
size_t n_output;
Onnx__ValueInfoProto **output;
/*
* Information for the values in the graph. The ValueInfoProto.name's
* must be distinct. It is optional for a value to appear in value_info list.
*/
size_t n_value_info;
Onnx__ValueInfoProto **value_info;
};
#define ONNX__GRAPH_PROTO__INIT \
{ PROTOBUF_C_MESSAGE_INIT (&onnx__graph_proto__descriptor) \
, 0,NULL, NULL, 0,NULL, NULL, 0,NULL, 0,NULL, 0,NULL }
/*
* For very large tensors, we may want to store them in chunks, in which
* case the following fields will specify the segment that is stored in
* the current TensorProto.
*/
struct _Onnx__TensorProto__Segment
{
ProtobufCMessage base;
protobuf_c_boolean has_begin;
int64_t begin;
protobuf_c_boolean has_end;
int64_t end;
};
#define ONNX__TENSOR_PROTO__SEGMENT__INIT \
{ PROTOBUF_C_MESSAGE_INIT (&onnx__tensor_proto__segment__descriptor) \
, 0,0, 0,0 }
/*
* Tensors
* A serialized tensor value.
*/
struct _Onnx__TensorProto
{
ProtobufCMessage base;
/*
* The shape of the tensor.
*/
size_t n_dims;
int64_t *dims;
/*
* The data type of the tensor.
*/
protobuf_c_boolean has_data_type;
Onnx__TensorProto__DataType data_type;
Onnx__TensorProto__Segment *segment;
/*
* For int and complex64 values
* Complex64 tensors are encoded as a single array of floats,
* with the real components appearing in odd numbered positions,
* and the corresponding imaginary component apparing in the
* subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i]
* is encoded as [1.0, 2.0 ,3.0 ,4.0]
* When this field is present, the data_type field MUST be FLOAT or COMPLEX64.
*/
size_t n_float_data;
int *float_data;
/*
* For int32, uint8, int8, uint16, int16, bool, and float16 values
* float16 values must be bit-wise converted to an uint16_t prior
* to writing to the buffer.
* When this field is present, the data_type field MUST be
* INT32, INT16, INT8, UINT16, INT8, BOOL, or FLOAT16
*/
size_t n_int32_data;
int32_t *int32_data;
/*
* For strings.
* Each element of string_data is a UTF-8 encoded Unicode
* string. No trailing null, no leading BOM. The protobuf "string"
* scalar type is not used to match ML community conventions.
* When this field is present, the data_type field MUST be STRING
*/
size_t n_string_data;
ProtobufCBinaryData *string_data;
/*
* For int64.
* When this field is present, the data_type field MUST be INT64
*/
size_t n_int64_data;
int64_t *int64_data;
/*
* Optionally, a name for the tensor.
*/
/*
* namespace Value
*/
char *name;
/*
* A human-readable documentation for this tensor. Markdown is allowed.
*/
char *doc_string;
/*
* Serializations can either use one of the fields above, or use this
* raw bytes field. The only exception is the string case, where one is
* required to store the content in the repeated bytes string_data field.
* When this raw_data field is used to store tensor value, elements MUST
* be stored in as fixed-width, little-endian order.
* Floating-point data types MUST be stored in IEEE 754 format.
* Complex64 elements must be written as two consecutive FLOAT values, real component first.
* Complex128 elements must be written as two consecutive DOUBLE values, real component first.
* Boolean type MUST be written one byte per tensor element (00000001 for true, 00000000 for false).
* Note: the advantage of specific field rather than the raw_data field is
* that in some cases (e.g. int data), protobuf does a better packing via
* variable length storage, and may lead to smaller binary footprint.
* When this field is present, the data_type field MUST NOT be STRING or UNDEFINED
*/
protobuf_c_boolean has_raw_data;
ProtobufCBinaryData raw_data;
/*
* For double
* Complex128 tensors are encoded as a single array of doubles,
* with the real components appearing in odd numbered positions,
* and the corresponding imaginary component apparing in the
* subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i]
* is encoded as [1.0, 2.0 ,3.0 ,4.0]
* When this field is present, the data_type field MUST be DOUBLE or COMPLEX128
*/
size_t n_double_data;
double *double_data;
/*
* For uint64 and uint32 values
* When this field is present, the data_type field MUST be
* UINT32 or UINT64
*/
size_t n_uint64_data;
uint64_t *uint64_data;
};
#define ONNX__TENSOR_PROTO__INIT \
{ PROTOBUF_C_MESSAGE_INIT (&onnx__tensor_proto__descriptor) \
, 0,NULL, 0,0, NULL, 0,NULL, 0,NULL, 0,NULL, 0,NULL, NULL, NULL, 0,{0,NULL}, 0,NULL, 0,NULL }
typedef enum {
ONNX__TENSOR_SHAPE_PROTO__DIMENSION__VALUE__NOT_SET = 0,
ONNX__TENSOR_SHAPE_PROTO__DIMENSION__VALUE_DIM_VALUE = 1,
ONNX__TENSOR_SHAPE_PROTO__DIMENSION__VALUE_DIM_PARAM = 2,
} Onnx__TensorShapeProto__Dimension__ValueCase;
struct _Onnx__TensorShapeProto__Dimension
{
ProtobufCMessage base;
/*
* Standard denotation can optionally be used to denote tensor
* dimensions with standard semantic descriptions to ensure
* that operations are applied to the correct axis of a tensor.
* Refer to https://github.com/onnx/onnx/blob/master/docs/DimensionDenotation.md#denotation-definition
* for pre-defined dimension denotations.
*/
char *denotation;
Onnx__TensorShapeProto__Dimension__ValueCase value_case;
//#pragma anon_unions
union {
int64_t dim_value;
/*
* namespace Shape
*/
char *dim_param;
};
};
#define ONNX__TENSOR_SHAPE_PROTO__DIMENSION__INIT \
{ PROTOBUF_C_MESSAGE_INIT (&onnx__tensor_shape_proto__dimension__descriptor) \
, NULL, ONNX__TENSOR_SHAPE_PROTO__DIMENSION__VALUE__NOT_SET, {0} }
/*
* Defines a tensor shape. A dimension can be either an integer value
* or a symbolic variable. A symbolic variable represents an unknown
* dimension.
*/
struct _Onnx__TensorShapeProto
{
ProtobufCMessage base;
size_t n_dim;
Onnx__TensorShapeProto__Dimension **dim;
};
#define ONNX__TENSOR_SHAPE_PROTO__INIT \
{ PROTOBUF_C_MESSAGE_INIT (&onnx__tensor_shape_proto__descriptor) \
, 0,NULL }
struct _Onnx__TypeProto__Tensor
{
ProtobufCMessage base;
/*
* This field MUST NOT have the value of UNDEFINED
* This field MUST be present for this version of the IR.
*/
protobuf_c_boolean has_elem_type;
Onnx__TensorProto__DataType elem_type;
Onnx__TensorShapeProto *shape;
};
#define ONNX__TYPE_PROTO__TENSOR__INIT \
{ PROTOBUF_C_MESSAGE_INIT (&onnx__type_proto__tensor__descriptor) \
, 0,0, NULL }
typedef enum {
ONNX__TYPE_PROTO__VALUE__NOT_SET = 0,
ONNX__TYPE_PROTO__VALUE_TENSOR_TYPE = 1,
} Onnx__TypeProto__ValueCase;
/*
* Types
* The standard ONNX data types.
*/
struct _Onnx__TypeProto
{
ProtobufCMessage base;
/*
* An optional denotation can be used to denote the whole
* type with a standard semantic description as to what is
* stored inside. Refer to https://github.com/onnx/onnx/blob/master/docs/TypeDenotation.md#type-denotation-definition
* for pre-defined type denotations.
*/
char *denotation;
Onnx__TypeProto__ValueCase value_case;
//#pragma anon_unions
union {
/*
* The type of a tensor.
*/
Onnx__TypeProto__Tensor *tensor_type;
};
};
#define ONNX__TYPE_PROTO__INIT \
{ PROTOBUF_C_MESSAGE_INIT (&onnx__type_proto__descriptor) \
, NULL, ONNX__TYPE_PROTO__VALUE__NOT_SET, {0} }
/*
* Operator Sets
* OperatorSets are uniquely identified by a (domain, opset_version) pair.
*/
struct _Onnx__OperatorSetIdProto
{
ProtobufCMessage base;
/*
* The domain of the operator set being identified.
* The empty string ("") or absence of this field implies the operator
* set that is defined as part of the ONNX specification.
* This field MUST be present in this version of the IR when referring to any other operator set.
*/
char *domain;
/*
* The version of the operator set being identified.
* This field MUST be present in this version of the IR.
*/
protobuf_c_boolean has_version;
int64_t version;
};
#define ONNX__OPERATOR_SET_ID_PROTO__INIT \
{ PROTOBUF_C_MESSAGE_INIT (&onnx__operator_set_id_proto__descriptor) \
, NULL, 0,0 }
/* Onnx__AttributeProto methods */
void onnx__attribute_proto__init
(Onnx__AttributeProto *message);
size_t onnx__attribute_proto__get_packed_size
(const Onnx__AttributeProto *message);
size_t onnx__attribute_proto__pack
(const Onnx__AttributeProto *message,
uint8_t *out);
size_t onnx__attribute_proto__pack_to_buffer
(const Onnx__AttributeProto *message,
ProtobufCBuffer *buffer);
Onnx__AttributeProto *
onnx__attribute_proto__unpack
(ProtobufCAllocator *allocator,
size_t len,
const uint8_t *data);
void onnx__attribute_proto__free_unpacked
(Onnx__AttributeProto *message,
ProtobufCAllocator *allocator);
/* Onnx__ValueInfoProto methods */
void onnx__value_info_proto__init
(Onnx__ValueInfoProto *message);
size_t onnx__value_info_proto__get_packed_size
(const Onnx__ValueInfoProto *message);
size_t onnx__value_info_proto__pack
(const Onnx__ValueInfoProto *message,
uint8_t *out);
size_t onnx__value_info_proto__pack_to_buffer
(const Onnx__ValueInfoProto *message,
ProtobufCBuffer *buffer);
Onnx__ValueInfoProto *
onnx__value_info_proto__unpack
(ProtobufCAllocator *allocator,
size_t len,
const uint8_t *data);
void onnx__value_info_proto__free_unpacked
(Onnx__ValueInfoProto *message,
ProtobufCAllocator *allocator);
/* Onnx__NodeProto methods */
void onnx__node_proto__init
(Onnx__NodeProto *message);
size_t onnx__node_proto__get_packed_size
(const Onnx__NodeProto *message);
size_t onnx__node_proto__pack
(const Onnx__NodeProto *message,
uint8_t *out);
size_t onnx__node_proto__pack_to_buffer
(const Onnx__NodeProto *message,
ProtobufCBuffer *buffer);
Onnx__NodeProto *
onnx__node_proto__unpack
(ProtobufCAllocator *allocator,
size_t len,
const uint8_t *data);
void onnx__node_proto__free_unpacked
(Onnx__NodeProto *message,
ProtobufCAllocator *allocator);
/* Onnx__ModelProto methods */
void onnx__model_proto__init
(Onnx__ModelProto *message);
size_t onnx__model_proto__get_packed_size
(const Onnx__ModelProto *message);
size_t onnx__model_proto__pack
(const Onnx__ModelProto *message,
uint8_t *out);
size_t onnx__model_proto__pack_to_buffer
(const Onnx__ModelProto *message,
ProtobufCBuffer *buffer);
Onnx__ModelProto *
onnx__model_proto__unpack
(ProtobufCAllocator *allocator,
size_t len,
const uint8_t *data);
void onnx__model_proto__free_unpacked
(Onnx__ModelProto *message,
ProtobufCAllocator *allocator);
/* Onnx__StringStringEntryProto methods */
void onnx__string_string_entry_proto__init
(Onnx__StringStringEntryProto *message);
size_t onnx__string_string_entry_proto__get_packed_size
(const Onnx__StringStringEntryProto *message);
size_t onnx__string_string_entry_proto__pack
(const Onnx__StringStringEntryProto *message,
uint8_t *out);
size_t onnx__string_string_entry_proto__pack_to_buffer
(const Onnx__StringStringEntryProto *message,
ProtobufCBuffer *buffer);
Onnx__StringStringEntryProto *
onnx__string_string_entry_proto__unpack
(ProtobufCAllocator *allocator,
size_t len,
const uint8_t *data);
void onnx__string_string_entry_proto__free_unpacked
(Onnx__StringStringEntryProto *message,
ProtobufCAllocator *allocator);
/* Onnx__GraphProto methods */
void onnx__graph_proto__init
(Onnx__GraphProto *message);
size_t onnx__graph_proto__get_packed_size
(const Onnx__GraphProto *message);
size_t onnx__graph_proto__pack
(const Onnx__GraphProto *message,
uint8_t *out);
size_t onnx__graph_proto__pack_to_buffer
(const Onnx__GraphProto *message,
ProtobufCBuffer *buffer);
Onnx__GraphProto *
onnx__graph_proto__unpack
(ProtobufCAllocator *allocator,
size_t len,
const uint8_t *data);
void onnx__graph_proto__free_unpacked
(Onnx__GraphProto *message,
ProtobufCAllocator *allocator);
/* Onnx__TensorProto__Segment methods */
void onnx__tensor_proto__segment__init
(Onnx__TensorProto__Segment *message);
/* Onnx__TensorProto methods */
void onnx__tensor_proto__init
(Onnx__TensorProto *message);
size_t onnx__tensor_proto__get_packed_size
(const Onnx__TensorProto *message);
size_t onnx__tensor_proto__pack
(const Onnx__TensorProto *message,
uint8_t *out);
size_t onnx__tensor_proto__pack_to_buffer
(const Onnx__TensorProto *message,
ProtobufCBuffer *buffer);
Onnx__TensorProto *
onnx__tensor_proto__unpack
(ProtobufCAllocator *allocator,
size_t len,
const uint8_t *data);
void onnx__tensor_proto__free_unpacked
(Onnx__TensorProto *message,
ProtobufCAllocator *allocator);
/* Onnx__TensorShapeProto__Dimension methods */
void onnx__tensor_shape_proto__dimension__init
(Onnx__TensorShapeProto__Dimension *message);
/* Onnx__TensorShapeProto methods */
void onnx__tensor_shape_proto__init
(Onnx__TensorShapeProto *message);
size_t onnx__tensor_shape_proto__get_packed_size
(const Onnx__TensorShapeProto *message);
size_t onnx__tensor_shape_proto__pack
(const Onnx__TensorShapeProto *message,
uint8_t *out);
size_t onnx__tensor_shape_proto__pack_to_buffer
(const Onnx__TensorShapeProto *message,
ProtobufCBuffer *buffer);
Onnx__TensorShapeProto *
onnx__tensor_shape_proto__unpack
(ProtobufCAllocator *allocator,
size_t len,
const uint8_t *data);
void onnx__tensor_shape_proto__free_unpacked
(Onnx__TensorShapeProto *message,
ProtobufCAllocator *allocator);
/* Onnx__TypeProto__Tensor methods */
void onnx__type_proto__tensor__init
(Onnx__TypeProto__Tensor *message);
/* Onnx__TypeProto methods */
void onnx__type_proto__init
(Onnx__TypeProto *message);
size_t onnx__type_proto__get_packed_size
(const Onnx__TypeProto *message);
size_t onnx__type_proto__pack
(const Onnx__TypeProto *message,
uint8_t *out);
size_t onnx__type_proto__pack_to_buffer
(const Onnx__TypeProto *message,
ProtobufCBuffer *buffer);
Onnx__TypeProto *
onnx__type_proto__unpack
(ProtobufCAllocator *allocator,
size_t len,
const uint8_t *data);
void onnx__type_proto__free_unpacked
(Onnx__TypeProto *message,
ProtobufCAllocator *allocator);
/* Onnx__OperatorSetIdProto methods */
void onnx__operator_set_id_proto__init
(Onnx__OperatorSetIdProto *message);
size_t onnx__operator_set_id_proto__get_packed_size
(const Onnx__OperatorSetIdProto *message);
size_t onnx__operator_set_id_proto__pack
(const Onnx__OperatorSetIdProto *message,
uint8_t *out);
size_t onnx__operator_set_id_proto__pack_to_buffer
(const Onnx__OperatorSetIdProto *message,
ProtobufCBuffer *buffer);
Onnx__OperatorSetIdProto *
onnx__operator_set_id_proto__unpack
(ProtobufCAllocator *allocator,
size_t len,
const uint8_t *data);
void onnx__operator_set_id_proto__free_unpacked
(Onnx__OperatorSetIdProto *message,
ProtobufCAllocator *allocator);
/* --- per-message closures --- */
typedef void (*Onnx__AttributeProto_Closure)
(const Onnx__AttributeProto *message,
void *closure_data);
typedef void (*Onnx__ValueInfoProto_Closure)
(const Onnx__ValueInfoProto *message,
void *closure_data);
typedef void (*Onnx__NodeProto_Closure)
(const Onnx__NodeProto *message,
void *closure_data);
typedef void (*Onnx__ModelProto_Closure)
(const Onnx__ModelProto *message,
void *closure_data);
typedef void (*Onnx__StringStringEntryProto_Closure)
(const Onnx__StringStringEntryProto *message,
void *closure_data);
typedef void (*Onnx__GraphProto_Closure)
(const Onnx__GraphProto *message,
void *closure_data);
typedef void (*Onnx__TensorProto__Segment_Closure)
(const Onnx__TensorProto__Segment *message,
void *closure_data);
typedef void (*Onnx__TensorProto_Closure)
(const Onnx__TensorProto *message,
void *closure_data);
typedef void (*Onnx__TensorShapeProto__Dimension_Closure)
(const Onnx__TensorShapeProto__Dimension *message,
void *closure_data);
typedef void (*Onnx__TensorShapeProto_Closure)
(const Onnx__TensorShapeProto *message,
void *closure_data);
typedef void (*Onnx__TypeProto__Tensor_Closure)
(const Onnx__TypeProto__Tensor *message,
void *closure_data);
typedef void (*Onnx__TypeProto_Closure)
(const Onnx__TypeProto *message,
void *closure_data);
typedef void (*Onnx__OperatorSetIdProto_Closure)
(const Onnx__OperatorSetIdProto *message,
void *closure_data);
/* --- services --- */
/* --- descriptors --- */
extern const ProtobufCEnumDescriptor onnx__version__descriptor;
extern const ProtobufCMessageDescriptor onnx__attribute_proto__descriptor;
extern const ProtobufCEnumDescriptor onnx__attribute_proto__attribute_type__descriptor;
extern const ProtobufCMessageDescriptor onnx__value_info_proto__descriptor;
extern const ProtobufCMessageDescriptor onnx__node_proto__descriptor;
extern const ProtobufCMessageDescriptor onnx__model_proto__descriptor;
extern const ProtobufCMessageDescriptor onnx__string_string_entry_proto__descriptor;
extern const ProtobufCMessageDescriptor onnx__graph_proto__descriptor;
extern const ProtobufCMessageDescriptor onnx__tensor_proto__descriptor;
extern const ProtobufCMessageDescriptor onnx__tensor_proto__segment__descriptor;
extern const ProtobufCEnumDescriptor onnx__tensor_proto__data_type__descriptor;
extern const ProtobufCMessageDescriptor onnx__tensor_shape_proto__descriptor;
extern const ProtobufCMessageDescriptor onnx__tensor_shape_proto__dimension__descriptor;
extern const ProtobufCMessageDescriptor onnx__type_proto__descriptor;
extern const ProtobufCMessageDescriptor onnx__type_proto__tensor__descriptor;
extern const ProtobufCMessageDescriptor onnx__operator_set_id_proto__descriptor;
PROTOBUF_C__END_DECLS
#endif /* PROTOBUF_C_src_2fonnx_2eproto__INCLUDED */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff