سلام .قبلا به من یک کدی برای اینکار نوشتم .
//OnnxClassificationInference.h
#pragma once
#define _CRT_SECURE_NO_WARNINGS
#include <opencv2/core/core.hpp>
#include <onnxruntime/core/session/experimental_onnxruntime_cxx_api.h>
class OnnxClassificationInference
{
public:
OnnxClassificationInference(const std::string& model_filename, const cv::Size& input_size);
std::vector<float> inference(cv::Mat cv_image);
private:
std::string model_filename_;
cv::Size input_size_;
std::vector<float> input_image_;
Ort::Env env = Ort::Env(ORT_LOGGING_LEVEL_ERROR, "MyProject");
Ort::Session* ort_session_ = nullptr;
Ort::SessionOptions session_options_ = Ort::SessionOptions();
std::vector<char*> input_names_;
std::vector<char*> output_names_;
int output_elem_count_{ 0 };
std::vector<std::vector<int64_t>> input_node_dims_;
std::vector<std::vector<int64_t>> output_node_dims_;
int64_t output_size_;
void mat2Array(const cv::Mat& srcimg, std::vector<float>& input_image);
void setInputInfo();
void setOutputInfo();
};
//OnnxClassificationInference.cpp
#include "OnnxClassificationInference.h"
#include <opencv2/imgproc/imgproc.hpp>
#include <EcvString.h>
#include <EcvString.h>
#include <iostream>
using namespace cv;
using namespace std;
using namespace Ort;
OnnxClassificationInference::OnnxClassificationInference(const std::string& model_filename, const cv::Size& input_size):
model_filename_{ model_filename },
input_size_(input_size)
{
session_options_.SetGraphOptimizationLevel(ORT_ENABLE_BASIC);
ort_session_ = new Session(env, ecv::s2ws(model_filename).c_str(), session_options_);
setInputInfo();
setOutputInfo();
}
std::vector<float> OnnxClassificationInference::inference(cv::Mat cv_image)
{
this->mat2Array(cv_image, input_image_);
array<int64_t, 4> input_shape{ 1, 3, input_size_.height, input_size_.width };
auto allocator_info = MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU);
Value input_tensor_ = Value::CreateTensor<float>(allocator_info, input_image_.data(), input_image_.size(), input_shape.data(), input_shape.size());
auto ort_outputs = ort_session_->Run(RunOptions{ nullptr }, input_names_.data(), &input_tensor_, 1, output_names_.data(), output_names_.size());
auto output_data = ort_outputs[0].GetTensorMutableData<float>();
std::vector<float> result(output_data, output_data + output_size_);
return result;
}
void OnnxClassificationInference::mat2Array(const cv::Mat& src_img, std::vector<float>& input_image)
{
Mat dst_img;
resize(src_img, dst_img, input_size_, INTER_LINEAR);
int row = dst_img.rows;
int col = dst_img.cols;
this->input_image_.resize(row * col * dst_img.channels());
for (int c = 0; c < 3; c++)
{
for (int i = 0; i < row; i++)
{
for (int j = 0; j < col; j++)
{
float pix = dst_img.ptr<uchar>(i)[j * 3 + 2 - c];
this->input_image_[c * row * col + i * col + j] = pix / 255.0;
}
}
}
}
void OnnxClassificationInference::setInputInfo()
{
auto num_input_nodes = ort_session_->GetInputCount();
AllocatorWithDefaultOptions allocator;
for (int i = 0; i < num_input_nodes; i++)
{
input_names_.push_back(ort_session_->GetInputName(i, allocator));
Ort::TypeInfo input_type_info = ort_session_->GetInputTypeInfo(i);
auto input_tensor_info = input_type_info.GetTensorTypeAndShapeInfo();
auto input_dims = input_tensor_info.GetShape();
input_node_dims_.push_back(input_dims);
}
}
void OnnxClassificationInference::setOutputInfo()
{
auto num_output_nodes = ort_session_->GetOutputCount();
output_size_ = 1;
AllocatorWithDefaultOptions allocator;
for (int i = 0; i < num_output_nodes; i++)
{
output_names_.push_back(ort_session_->GetOutputName(i, allocator));
Ort::TypeInfo output_type_info = ort_session_->GetOutputTypeInfo(i);
auto output_tensor_info = output_type_info.GetTensorTypeAndShapeInfo();
auto output_dims = output_tensor_info.GetShape();
for (auto& item : output_dims)
if (item)
output_size_ *= abs(item);
output_node_dims_.push_back(output_dims);
}
}