add onnx pack

This commit is contained in:
dkk0918
2021-09-06 21:50:44 +08:00
parent 427b13d14a
commit 6fb59b9313
27 changed files with 9215 additions and 0 deletions

View File

@@ -0,0 +1,35 @@
#include "onnx.h"
void add(const int *input, // pointer to vector
const int *bias, // pointer to matrix
const uint16_t dim_vec, // length of the vector
int *output)
{
for (int i = 0; i < dim_vec; i++)
{
output[i] = input[i] + bias[i];
}
}
int* add_layer(Onnx__GraphProto* graph, const int *input, int64_t* shapeInput, int64_t* shapeOutput, const char* layer_name)
{
//assert(graph != NULL && input != NULL && layer_name != "" );
Onnx__NodeProto* node = onnx_graph_get_node_by_name(graph, layer_name);
const char* bias = node->input[1];
int* B = onnx_graph_get_weights_by_name(graph, bias);
int64_t* shapeB = onnx_graph_get_dims_by_name(graph, bias);
if(shapeB == NULL)
{
return NULL;
}
int* output = (int*) malloc(sizeof(int)*shapeB[0]);
memset(output, 0, sizeof(sizeof(int)*shapeB[0]));
add(input, B, shapeB[0], output);
memcpy(shapeInput, shapeOutput, sizeof(int64_t)*3);
return output;
}

View File

@@ -0,0 +1,113 @@
#include "onnx.h"
void conv2D(const int *input, // input image
const uint16_t dim_im_in_x, // input image dimention x
const uint16_t dim_im_in_y, // input image dimention y
const uint16_t ch_im_in, // number of input image channels
const int *weight, // kernel weights
const uint16_t ch_im_out, // number of filters, i.e., output image channels
const uint16_t dim_kernel_x, // filter kernel size x
const uint16_t dim_kernel_y, // filter kernel size y
const uint16_t padding_x, // padding sizes x
const uint16_t padding_y, // padding sizes y
const uint16_t stride_x, // stride x
const uint16_t stride_y, // stride y
const int *bias, // bias
int *output, // output image
const uint16_t dim_im_out_x, // output image dimension x
const uint16_t dim_im_out_y // output image dimension y
)
{
int i, j, k, l, m, n;
int conv_out = 0.0f;
int in_row, in_col;
// For each filter
for (i = 0; i < ch_im_out; i++)
{
// For each image dimension
for (j = 0; j < dim_im_out_y; j++)
{
for (k = 0; k < dim_im_out_x; k++)
{
conv_out = bias[i];
// For each kernel dimension
for (m = 0; m < dim_kernel_y; m++)
{
for (n = 0; n < dim_kernel_x; n++)
{
// if-for implementation
in_row = stride_y * j + m - padding_y;
in_col = stride_x * k + n - padding_x;
if (in_row >= 0 && in_col >= 0 && in_row < dim_im_in_y && in_col < dim_im_in_x)
{
// For each input channel
for (l = 0; l < ch_im_in; l++)
{
conv_out += input[(in_row * dim_im_in_x + in_col) * ch_im_in + l] *
weight[i * ch_im_in * dim_kernel_y * dim_kernel_x + (m * dim_kernel_x + n) * ch_im_in +
l];
}
}
}
}
output[i + (j * dim_im_out_x + k) * ch_im_out] = conv_out;
}
}
}
}
int* conv2D_layer(Onnx__GraphProto* graph, const int *input, int64_t* shapeInput, int64_t* shapeOutput, const char* layer_name)
{
//assert(graph != NULL && input != NULL && layer_name != "" );
Onnx__NodeProto* node = onnx_graph_get_node_by_name(graph, layer_name);
if(node == NULL)
{
// layer not found
return NULL;
}
const char* weight = node->input[1];
const char* bias = node->input[2];
// Get weight shape
int64_t* shapeW = onnx_graph_get_dims_by_name(graph, weight);
if(shapeW == NULL)
{
return NULL;
}
int64_t dimW = onnx_graph_get_dim_by_name(graph, weight);
if(dimW < 0)
{
return NULL;
}
// Get weights
// NCWH --> NWHC
int64_t permW_t[] = { 0, 2, 3, 1};
int* W = onnx_graph_get_weights_by_name(graph, weight);
if(W == NULL)
{
return NULL;
}
int* W_t = transpose(W, shapeW, dimW, permW_t);
// Get bias
int* B = onnx_graph_get_weights_by_name(graph, bias);
if(B == NULL)
{
return NULL;
}
int* output = (int*) malloc(sizeof(int)*shapeW[0]*shapeInput[W_INDEX]*shapeInput[H_INDEX]);
memset(output, 0, sizeof(sizeof(int)*shapeW[0]*shapeInput[W_INDEX]*shapeInput[H_INDEX]));
conv2D(input, shapeInput[W_INDEX], shapeInput[H_INDEX], shapeW[1], W_t, shapeW[0], shapeW[2], shapeW[3], 1, 1, 1, 1, B, output, shapeInput[W_INDEX], shapeInput[H_INDEX]);
shapeOutput[W_INDEX] = shapeInput[W_INDEX];
shapeOutput[H_INDEX] = shapeInput[H_INDEX];
shapeOutput[C_INDEX] = shapeW[0];
free(W_t);
return output;
}

View File

@@ -0,0 +1,19 @@
#include "onnx.h"
void dense(const int *input, // pointer to vector
const int *weight, // pointer to matrix
const uint16_t dim_vec, // length of the vector
const uint16_t num_of_rows, // numCol of A
const int *bias,
int *output) // output operand
{
for (int i = 0; i < num_of_rows; i++)
{
int ip_out = bias[i];
for (int j = 0; j < dim_vec; j++)
{
ip_out += input[j] * weight[i * dim_vec + j];
}
output[i] = ip_out;
}
}

View File

@@ -0,0 +1,25 @@
#include "onnx.h"
void onnx_tensor_info(const int* A, int64_t* shape, int64_t dim)
{
int elem = 1;
for(int i = 0; i < dim; i++)
{
elem = elem * shape[i];
}
printf("Array size: %d\n", elem);
for(int i = 0; i < elem; i++)
{
printf( "%f ", A[i] );
int split = 1;
for(int j = dim-1; j > 0; j--)
{
split = split * shape[j];
if( (i+1) % split == 0)
{
printf("\n");
}
}
}
}

View File

@@ -0,0 +1,63 @@
#include "onnx.h"
void matmul(const int *input, // pointer to vector
const int *weight, // pointer to matrix
const uint16_t dim_vec, // length of the vector
const uint16_t num_of_rows, // numCol of A
int *output)
{
for (int i = 0; i < num_of_rows; i++)
{
int ip_out = 0;
for (int j = 0; j < dim_vec; j++)
{
ip_out += input[j] * weight[i * dim_vec + j];
}
output[i] = ip_out;
}
}
int* matmul_layer(Onnx__GraphProto* graph, const int *input, int64_t* shapeInput, int64_t* shapeOutput, const char* layer_name)
{
//assert(graph != NULL && input != NULL && layer_name != "" );
Onnx__NodeProto* node = onnx_graph_get_node_by_name(graph, layer_name);
const char* weight = node->input[1];
int64_t* shapeW = onnx_graph_get_dims_by_name(graph, weight);
if(shapeW == NULL)
{
return NULL;
}
int64_t dimW = onnx_graph_get_dim_by_name(graph, weight);
if(dimW < 0)
{
return NULL;
}
//assert(shapeW[0] == shapeInput[1]);
int64_t permW_t[] = {1, 0};
int* W = onnx_graph_get_weights_by_name(graph, weight);
if(W == NULL)
{
return NULL;
}
int* W_t = transpose(W, shapeW, dimW, permW_t);
int* output = (int*) malloc(sizeof(int)*shapeW[1]);
if(output == NULL)
{
// No memory
return NULL;
}
memset(output, 0, sizeof(sizeof(int)*shapeW[1]));
matmul(input, W_t, shapeW[0], shapeW[1], output);
shapeOutput[0] = shapeInput[0];
shapeOutput[1] = shapeW[1];
free(W_t);
return output;
}

View File

@@ -0,0 +1,96 @@
#include "onnx.h"
void maxpool(const int *input,
const uint16_t dim_im_in_x, // input image dimension x or W
const uint16_t dim_im_in_y, // input image dimension y or H
const uint16_t ch_im_in, // number of input image channels
const uint16_t dim_kernel_x, // window kernel size
const uint16_t dim_kernel_y, // window kernel size
const uint16_t padding_x, // padding sizes
const uint16_t padding_y, // padding sizes
const uint16_t stride_x, // stride
const uint16_t stride_y, // stride
const uint16_t dim_im_out_x, // output image dimension x or W
const uint16_t dim_im_out_y, // output image dimension y or H
int *output)
{
int16_t i_ch_in, i_x, i_y;
int16_t k_x, k_y;
for (i_ch_in = 0; i_ch_in < ch_im_in; i_ch_in++)
{
for (i_y = 0; i_y < dim_im_out_y; i_y++)
{
for (i_x = 0; i_x < dim_im_out_x; i_x++)
{
//int max = FLT_MIN;
int max = 0;
for (k_y = i_y * stride_y - padding_y; k_y < i_y * stride_y - padding_y + dim_kernel_y; k_y++)
{
for (k_x = i_x * stride_x - padding_x; k_x < i_x * stride_x - padding_x + dim_kernel_x; k_x++)
{
if (k_y >= 0 && k_x >= 0 && k_y < dim_im_in_y && k_x < dim_im_in_x)
{
if (input[i_ch_in + ch_im_in * (k_x + k_y * dim_im_in_x)] > max)
{
max = input[i_ch_in + ch_im_in * (k_x + k_y * dim_im_in_x)];
}
}
}
}
output[i_ch_in + ch_im_in * (i_x + i_y * dim_im_out_x)] = max;
}
}
}
}
int* maxpool_layer(Onnx__GraphProto* graph, int* input, int64_t* shapeInput, int64_t* shapeOutput, const char* layer_name)
{
//assert(graph != NULL && input != NULL && layer_name != "" );
Onnx__NodeProto* node = onnx_graph_get_node_by_name(graph, layer_name);
if(node == NULL)
{
// layer not found
return NULL;
}
uint16_t kernel_x = 1;
uint16_t kernel_y = 1;
uint16_t padding_x = 0;
uint16_t padding_y = 0;
uint16_t stride_x = 1;
uint16_t stride_y = 1;
for(int i = 0; i < node->n_attribute; i++)
{
if( strcmp(node->attribute[i]->name, "kernel_shape") == 0 )
{
kernel_x = node->attribute[i]->ints[0];
kernel_y = node->attribute[i]->ints[1];
}
if( strcmp(node->attribute[i]->name, "strides") == 0 )
{
stride_x = node->attribute[i]->ints[0];
stride_y = node->attribute[i]->ints[1];
}
}
uint16_t out_x = (shapeInput[W_INDEX] - kernel_x + 2 * padding_x) / stride_x + 1;
uint16_t out_y = (shapeInput[H_INDEX] - kernel_y + 2 * padding_y) / stride_y + 1;
int* output = (int*) malloc(sizeof(int)*out_x*out_y*shapeInput[C_INDEX]);
if(output == NULL)
{
// No memory
return NULL;
}
memset(output, 0, sizeof(sizeof(int)*out_x*out_y*shapeInput[C_INDEX]));
maxpool(input, shapeInput[W_INDEX], shapeInput[H_INDEX], shapeInput[C_INDEX], kernel_x, kernel_y, padding_x, padding_y, stride_x, stride_y, out_x, out_y, output);
shapeOutput[W_INDEX] = out_x;
shapeOutput[H_INDEX] = out_y;
shapeOutput[C_INDEX] = shapeInput[C_INDEX];
return output;
}

View File

@@ -0,0 +1,84 @@
#include <inttypes.h>
#include "onnx.h"
int* onnx_model_run(Onnx__ModelProto* model, int* input, int64_t* shapeInput)
{
int64_t* shapeOutput = (int64_t*) malloc(sizeof(int64_t)*3);
shapeOutput[0] = -1; shapeOutput[1] = -1; shapeOutput[2] = -1;
Onnx__NodeProto* node = onnx_graph_get_node_by_input(model->graph, model->graph->input[0]->name);
int i = 0;
int* output;
while(node != NULL)
{
printf("[%2d] %-10s %-20s ", i++, node->op_type, node->name);
if(strcmp(node->op_type, "Conv") == 0)
{
output = conv2D_layer(model->graph, input, shapeInput, shapeOutput, node->name);
}
else if(strcmp(node->op_type, "Relu") == 0)
{
output = relu_layer(model->graph, input, shapeInput, shapeOutput, node->name);
}
else if(strcmp(node->op_type, "MaxPool") == 0)
{
output = maxpool_layer(model->graph, input, shapeInput, shapeOutput, node->name);
}
else if(strcmp(node->op_type, "Softmax") == 0)
{
output = softmax_layer(model->graph, input, shapeInput, shapeOutput, node->name);
}
else if(strcmp(node->op_type, "MatMul") == 0)
{
output = matmul_layer(model->graph, input, shapeInput, shapeOutput, node->name);
}
else if(strcmp(node->op_type, "Add") == 0)
{
output = add_layer(model->graph, input, shapeInput, shapeOutput, node->name);
}
else if(strcmp(node->op_type, "Identity") == 0)
{
node = onnx_graph_get_node_by_input(model->graph, node->output[0]);
printf("\n");
continue;
}
else if(strcmp(node->op_type, "Transpose") == 0)
{
node = onnx_graph_get_node_by_input(model->graph, node->output[0]);
printf("\n");
continue;
}
else if(strcmp(node->op_type, "Reshape") == 0)
{
shapeOutput[1] = shapeOutput[0] * shapeOutput[1] * shapeOutput[2];
shapeOutput[2] = 1;
shapeOutput[0] = 1;
printf("[%2" PRId64 ", %2" PRId64 ", %2" PRId64 "] --> [%2" PRId64 ", %2" PRId64 ", %2" PRId64 "]\n", shapeInput[0], shapeInput[1], shapeInput[2], shapeOutput[0], shapeOutput[1], shapeOutput[2]);
// free(input);
// input = output;
memcpy(shapeInput, shapeOutput, sizeof(int64_t)*3);
node = onnx_graph_get_node_by_input(model->graph, node->output[0]);
continue;
}
else
{
printf("Unsupported operand: %s\n", node->op_type);
}
printf("[%2" PRId64 ", %2" PRId64 ", %2" PRId64 "] --> [%2" PRId64 ", %2" PRId64 ", %2" PRId64 "]\n", shapeInput[0], shapeInput[1], shapeInput[2], shapeOutput[0], shapeOutput[1], shapeOutput[2]);
free(input);
input = output;
memcpy(shapeInput, shapeOutput, sizeof(int64_t)*3);
node = onnx_graph_get_node_by_input(model->graph, node->output[0]);
}
output = input;
free(shapeOutput);
return output;
}

View File

@@ -0,0 +1,284 @@
#include "onnx-parser.h"
const char* onnx_tensor_proto_data_type[] = {
"Undefined",
"FLOAT",
"UINT8",
"INT8",
"UINT16",
"INT16",
"INT32",
"INT64",
"STRING",
"BOOL",
"FLOAT16",
"DOUBLE",
"UINT32",
"UINT64",
"COMPLEX64",
"COMPLEX128"
};
/*
Onnx__ModelProto* onnx_load_model(const char* onnx_file_name)
{
unsigned char* buffer;
FILE *fp;
// Get File Size
fp = fopen(onnx_file_name,"rb");
fseek(fp, 0L, SEEK_END);
int sz = ftell(fp);
fseek(fp, 0L, SEEK_SET);
// printf("File size %s is %d\n", onnx_file_name, sz);
// Read File
buffer = (unsigned char*) malloc(sizeof(unsigned char) * sz);
if(buffer == NULL)
{
printf("Failed to malloc %d bytes memory for %s\n", sz, onnx_file_name);
return NULL;
}
fread(buffer, sz, 1, fp);
Onnx__ModelProto* model = onnx__model_proto__unpack(NULL, sz, buffer);
free(buffer);
fclose(fp);
return model;
}
*/
void onnx_model_info(Onnx__ModelProto* model)
{
printf("---- Model info ----\n");
printf("IR Version is %ld\n", model->ir_version);
printf("Produceer name is %s\n", model->producer_name);
printf("Produceer version is %s\n", model->producer_version);
printf("Produceer version is %s\n", model->domain);
}
void onnx_graph_info(Onnx__GraphProto* graph)
{
printf("---- Graph Info ----\n");
// Input
printf("---- Graph Input Info ----\n");
printf("Graph inputs number: %ld\n", graph->n_input);
for(int i = 0; i < graph->n_input; i++)
{
onnx_graph_input_info(graph->input[i]);
}
// Output
printf("---- Graph Output Info ----\n");
printf("Graph outputs number: %ld\n", graph->n_output);
for(int i = 0; i < graph->n_output; i++)
{
onnx_graph_output_info(graph->output[i]);
}
// Nodes
printf("---- Graph Node Info ----\n");
printf("Graph nodes number: %ld\n", graph->n_node);
for(int i = 0; i < graph->n_node; i++)
{
onnx_graph_node_info(graph->node[i]);
}
}
void onnx_graph_info_sorted(Onnx__GraphProto* graph)
{
printf("---- Graph Info ----\n");
// Input
printf("---- Graph Input Info ----\n");
printf("Graph inputs number: %ld\n", graph->n_input);
for(int i = 0; i < graph->n_input; i++)
{
onnx_graph_input_info(graph->input[i]);
}
// Output
printf("---- Graph Output Info ----\n");
printf("Graph outputs number: %ld\n", graph->n_output);
for(int i = 0; i < graph->n_output; i++)
{
onnx_graph_output_info(graph->output[i]);
}
// Nodes
printf("---- Graph Node Info ----\n");
printf("Graph nodes number: %ld\n", graph->n_node);
Onnx__NodeProto* node = onnx_graph_get_node_by_input(graph, graph->input[0]->name);
while(node != NULL)
{
onnx_graph_node_info(node);
node = onnx_graph_get_node_by_input(graph, node->output[0]);
}
}
void onnx_graph_input_info(Onnx__ValueInfoProto* input)
{
printf("Input name %s\n", input->name);
Onnx__TypeProto* type = input->type;
Onnx__TypeProto__Tensor* tensor_type = type->tensor_type;
Onnx__TensorShapeProto* shape = tensor_type->shape;
printf("Input type %s\n", onnx_tensor_proto_data_type[tensor_type->elem_type]);
printf("Input dimension %ld\n", shape->n_dim);
for(int i = 0; i < shape->n_dim; i++)
{
onnx_graph_value_tensor_shape_dimension_info(shape->dim[i]);
if( i != shape->n_dim - 1)
{
printf(" x ");
}
}
printf("\n");
}
void onnx_graph_value_tensor_shape_dimension_info(Onnx__TensorShapeProto__Dimension* dim)
{
switch (dim->value_case)
{
case ONNX__TENSOR_SHAPE_PROTO__DIMENSION__VALUE__NOT_SET:
printf("?");
break;
case ONNX__TENSOR_SHAPE_PROTO__DIMENSION__VALUE_DIM_VALUE:
printf("%ld",dim->dim_value);
break;
case ONNX__TENSOR_SHAPE_PROTO__DIMENSION__VALUE_DIM_PARAM:
printf("%s",dim->dim_param);
break;
default:
printf("?");
break;
}
}
void onnx_graph_output_info(Onnx__ValueInfoProto* output)
{
printf("Output name %s\n", output->name);
Onnx__TypeProto* type = output->type;
Onnx__TypeProto__Tensor* tensor_type = type->tensor_type;
Onnx__TensorShapeProto* shape = tensor_type->shape;
printf("Output type %s\n", onnx_tensor_proto_data_type[tensor_type->elem_type]);
printf("Output dimension %ld\n", shape->n_dim);
for(int i = 0; i < shape->n_dim; i++)
{
onnx_graph_value_tensor_shape_dimension_info(shape->dim[i]);
if( i != shape->n_dim - 1)
{
printf(" x ");
}
}
printf("\n");
}
void onnx_graph_initializer_info(Onnx__TensorProto* initializer)
{
printf("%s: [", initializer->name);
for(int i = 0; i < initializer->n_dims; i++)
{
printf("%ld, ", initializer->dims[i]);
}
printf("]\n");
printf("%s: [", initializer->name);
for(int i = 0; i < initializer->n_float_data; i++)
{
printf("%f, ", initializer->float_data[i]);
}
printf("]\n");
}
Onnx__NodeProto* onnx_graph_get_node_by_name(Onnx__GraphProto* graph, const char* node_name)
{
for(int i = 0; i < graph->n_node; i++)
{
Onnx__NodeProto* node = graph->node[i];
for(int j = 0; j < node->n_input; j++)
{
if( strcmp(node->name, node_name) == 0)
{
return node;
}
}
}
return NULL;
}
Onnx__NodeProto* onnx_graph_get_node_by_input(Onnx__GraphProto* graph, const char* input_name)
{
for(int i = 0; i < graph->n_node; i++)
{
Onnx__NodeProto* node = graph->node[i];
for(int j = 0; j < node->n_input; j++)
{
if( strcmp(node->input[j], input_name) == 0)
{
return node;
}
}
}
return NULL;
}
int* onnx_graph_get_weights_by_name(Onnx__GraphProto* graph, const char* node_name)
{
Onnx__TensorProto** initializer = graph->initializer;
for(int i = 0; i < graph->n_initializer; i++)
{
if( strcmp(graph->initializer[i]->name, node_name) == 0)
{
return graph->initializer[i]->float_data;
}
}
return NULL;
}
long* onnx_graph_get_dims_by_name(Onnx__GraphProto* graph, const char* node_name)
{
Onnx__TensorProto** initializer = graph->initializer;
for(int i = 0; i < graph->n_initializer; i++)
{
if( strcmp(graph->initializer[i]->name, node_name) == 0)
{
return graph->initializer[i]->dims;
}
}
return NULL;
}
long onnx_graph_get_dim_by_name(Onnx__GraphProto* graph, const char* node_name)
{
Onnx__TensorProto** initializer = graph->initializer;
for(int i = 0; i < graph->n_initializer; i++)
{
if( strcmp(graph->initializer[i]->name, node_name) == 0)
{
return graph->initializer[i]->n_dims;
}
}
return -1;
}
void onnx_graph_node_info(Onnx__NodeProto* node)
{
printf("%-12s: %-30s -> %-30s [%s]\n", node->op_type, node->input[0], node->output[0], node->name);
}

View File

@@ -0,0 +1,27 @@
#ifndef __ONNX_PARSER_H__
#define __ONNX_PARSER_H__
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "onnx.pb-c.h"
Onnx__ModelProto* onnx_load_model(const char* onnx_file_name);
void onnx_model_info(Onnx__ModelProto* model);
void onnx_graph_info(Onnx__GraphProto* graph);
void onnx_graph_info_sorted(Onnx__GraphProto* graph);
void onnx_graph_input_info(Onnx__ValueInfoProto* input);
void onnx_graph_output_info(Onnx__ValueInfoProto* output);
void onnx_graph_node_info(Onnx__NodeProto* node);
void onnx_graph_initializer_info(Onnx__TensorProto* initializer);
Onnx__NodeProto* onnx_graph_get_node_by_name(Onnx__GraphProto* graph, const char* node_name);
Onnx__NodeProto* onnx_graph_get_node_by_input(Onnx__GraphProto* graph, const char* input_name);
long* onnx_graph_get_dims_by_name(Onnx__GraphProto* graph, const char* node_name);
long onnx_graph_get_dim_by_name(Onnx__GraphProto* graph, const char* node_name);
int* onnx_graph_get_weights_by_name(Onnx__GraphProto* graph, const char* node_name);
void onnx_graph_value_tensor_shape_dimension_info(Onnx__TensorShapeProto__Dimension* dim);
#endif //__ONNX_PARSER_H__

View File

@@ -0,0 +1,95 @@
#ifndef __ONNX_H__
#define __ONNX_H__
#include <stdio.h>
#include <stdint.h>
#include <string.h>
//#include <int.h>
#include <math.h>
#include <onnx-parser.h>
#define ONNX_USE_NWHC
#ifdef ONNX_USE_NWHC
// NWHC
#define W_INDEX 0
#define H_INDEX 1
#define C_INDEX 2
#else
// NCWH
#define C_INDEX 0
#define W_INDEX 1
#define H_INDEX 2
#endif
// Model
void onnx_tensor_info(const int* A, int64_t* shape, int64_t dim);
int* onnx_model_run(Onnx__ModelProto* model, int* input, int64_t* shapeInput);
// Layers
int* conv2D_layer(Onnx__GraphProto* graph, const int *input, int64_t* shapeInput, int64_t* shapeOutput, const char* layer_name);
int* relu_layer(Onnx__GraphProto* graph, const int *input, int64_t* shapeInput, int64_t* shapeOutput, const char* layer_name);
int* maxpool_layer(Onnx__GraphProto* graph, int* input, int64_t* shapeInput, int64_t* shapeOutput, const char* layer_name);
int* matmul_layer(Onnx__GraphProto* graph, const int *input, int64_t* shapeInput, int64_t* shapeOutput, const char* layer_name);
int* add_layer(Onnx__GraphProto* graph, const int *input, int64_t* shapeInput, int64_t* shapeOutput, const char* layer_name);
int* softmax_layer(Onnx__GraphProto* graph, const int *input, int64_t* shapeInput, int64_t* shapeOutput, const char* layer_name);
// Operators
int* transpose(const int* A, int64_t* shape, int64_t dim, int64_t* perm);
void conv2D(const int *input, // input image
const uint16_t dim_im_in_x, // input image dimention x
const uint16_t dim_im_in_y, // input image dimention y
const uint16_t ch_im_in, // number of input image channels
const int *weight, // kernel weights
const uint16_t ch_im_out, // number of filters, i.e., output image channels
const uint16_t dim_kernel_x, // filter kernel size x
const uint16_t dim_kernel_y, // filter kernel size y
const uint16_t padding_x, // padding sizes x
const uint16_t padding_y, // padding sizes y
const uint16_t stride_x, // stride x
const uint16_t stride_y, // stride y
const int *bias, // bias
int *output, // output image
const uint16_t dim_im_out_x, // output image dimension x
const uint16_t dim_im_out_y // output image dimension y
);
void relu(const int *input, uint32_t size, int* output);
void maxpool(const int *input,
const uint16_t dim_im_in_x, // input image dimension x or W
const uint16_t dim_im_in_y, // input image dimension y or H
const uint16_t ch_im_in, // number of input image channels
const uint16_t dim_kernel_x, // window kernel size
const uint16_t dim_kernel_y, // window kernel size
const uint16_t padding_x, // padding sizes
const uint16_t padding_y, // padding sizes
const uint16_t stride_x, // stride
const uint16_t stride_y, // stride
const uint16_t dim_im_out_x, // output image dimension x or W
const uint16_t dim_im_out_y, // output image dimension y or H
int *output);
void matmul(const int *input, // pointer to vector
const int *weight, // pointer to matrix
const uint16_t dim_vec, // length of the vector
const uint16_t num_of_rows, // numCol of A
int *output);
void add(const int *input, // pointer to vector
const int *bias, // pointer to matrix
const uint16_t dim_vec, // length of the vector
int *output);
void dense(const int *input, // pointer to vector
const int *weight, // pointer to matrix
const uint16_t dim_vec, // length of the vector
const uint16_t num_of_rows, // numCol of A
const int *bias,
int *output);
void softmax(const int *input, const uint32_t dim_vec, int *output);
#endif // __ONNX_H__

View File

@@ -0,0 +1,27 @@
#include "onnx.h"
void relu(const int *input, uint32_t size, int* output)
{
uint32_t i;
memcpy(output, input, sizeof(int) * size);
for (i = 0; i < size; i++)
{
if (output[i] < 0)
output[i] = 0;
}
}
int* relu_layer(Onnx__GraphProto* graph, const int *input, int64_t* shapeInput, int64_t* shapeOutput, const char* layer_name)
{
//assert(graph != NULL && input != NULL && layer_name != "" );
int64_t len = shapeInput[0] * shapeInput[1] * shapeInput[2];
int* output = (int*) malloc(sizeof(int)*len);
memset(output, 0, sizeof(sizeof(int)*len));
relu(input, len, output);
memcpy(shapeInput, shapeOutput, sizeof(int64_t)*3);
return output;
}

View File

@@ -0,0 +1,57 @@
#include "onnx.h"
//#include<math.h>
int abs_core(int x)
{
return x > 0?x:-x;
}
int exp_core(int x)
{
x = 1 + (x << 8);
x *= x; x *= x; x *= x; x *= x;
x *= x; x *= x; x *= x; x *= x;
return x;
}
void softmax(const int *input, const uint32_t dim_vec, int *output)
{
long long sum = 0;
for(int i = 0; i < dim_vec; i++)
{
output[i] = input[i] >> 16;
}
/*
for(int i = 0; i < dim_vec; i++)
{
output[i] = abs_core(input[i] >> 16);
sum = sum + (output[i]);
}
printf("sum = %ld\r\n" , sum);
for(int i = 0; i < dim_vec; i++)
{
//output[i] = output[i] / (sum);
output[i] = sum / output[i];
//output[i] = output[i];
}*/
}
int* softmax_layer(Onnx__GraphProto* graph, const int *input, int64_t* shapeInput, int64_t* shapeOutput, const char* layer_name)
{
//assert(graph != NULL && input != NULL && layer_name != "" && shapeInput[1] > 0);
int* output = (int*) malloc(sizeof(int)*shapeInput[1]);
memset(output, 0, sizeof(sizeof(int)*shapeInput[1]));
softmax(input, shapeInput[1], output);
memcpy(shapeInput, shapeOutput, sizeof(int64_t)*3);
return output;
}

View File

@@ -0,0 +1,26 @@
#include "tos_k.h"
#ifdef __CC_ARM
/* avoid the heap and heap-using library functions supplied by arm */
#pragma import(__use_no_heap)
#endif
void *malloc(size_t n)
{
return tos_mmheap_alloc(n);
}
void *realloc(void *rmem, size_t newsize)
{
return tos_mmheap_realloc(rmem, newsize);
}
void *calloc(size_t nelem, size_t elsize)
{
return tos_mmheap_calloc(nelem, elsize);
}
void free(void *rmem)
{
tos_mmheap_free(rmem);
}

View File

@@ -0,0 +1,97 @@
#include "onnx.h"
int* transpose(const int* A, int64_t* shape, int64_t dim, int64_t* perm)
{
// Get array size
int elem = 1;
for(int i = 0; i < dim; i++)
{
elem = elem * shape[i];
}
// Malloc memory for B
int* B = malloc(sizeof(int) * elem);
if(B == NULL)
{
return NULL;
}
// Malloc memory for shapeB
int* shapeB = malloc(sizeof(int) * dim);
if( shapeB == NULL)
{
return NULL;
}
for(int i = 0; i < dim; i++)
{
shapeB[i] = shape[perm[i]];
}
// Transpose
for(int src = 0; src < elem; src++)
{
// Get transposed B array
// A[1][0][3] -> B[3][1][0]
int temp = src;
int* indexA = malloc(sizeof(int) * dim);
if(indexA == NULL)
{
return NULL;
}
int* indexB = malloc(sizeof(int) * dim);
if(indexB == NULL)
{
return NULL;
}
for(int i = dim-1; i >= 0; i--)
{
indexA[i] = temp % shape[i];
temp = temp / shape[i];
}
for(int i = 0; i < dim; i++)
{
indexB[i] = indexA[perm[i]];
}
// Get transposed B index
// #15 A[1][0][3] -> B[3][1][0] #21
int dst = 0;
temp = 1;
for(int i = dim - 1; i >= 0; i--)
{
dst = dst + indexB[i] * temp;
temp = temp * shapeB[i];
}
B[dst] = A[src];
free(indexA);
free(indexB);
}
free(shapeB);
return B;
}
int* transpose_layer(Onnx__GraphProto* graph, const int *input, int64_t* shapeInput, int64_t* shapeOutput, const char* layer_name)
{
//assert(graph != NULL && input != NULL && layer_name != "" );
Onnx__NodeProto* node = onnx_graph_get_node_by_name(graph, layer_name);
if(node == NULL)
{
return NULL;
}
int64_t perm_t[3];
int64_t* perm = node->attribute[0]->ints;
perm_t[0] = perm[1] - 1;
perm_t[1] = perm[2] - 1;
perm_t[2] = perm[3] - 1;
int* output = transpose(input, shapeInput, 3, perm_t);
shapeOutput[0] = shapeInput[perm_t[0]];
shapeOutput[1] = shapeInput[perm_t[1]];
shapeOutput[2] = shapeInput[perm_t[2]];
return output;
}