22 #include "fastdeploy/runtime/backends/backend.h" 23 #include "fastdeploy/utils/unique_ptr.h" 24 #include "fastdeploy/runtime/backends/openvino/option.h" 25 #include "openvino/openvino.hpp" 29 class OpenVINOBackend :
public BaseBackend {
31 static ov::Core core_;
33 virtual ~OpenVINOBackend() =
default;
35 bool Init(
const RuntimeOption& option);
37 bool Infer(std::vector<FDTensor>& inputs, std::vector<FDTensor>* outputs,
38 bool copy_to_fd =
true)
override;
40 int NumInputs()
const override;
42 int NumOutputs()
const override;
44 TensorInfo GetInputInfo(
int index)
override;
45 TensorInfo GetOutputInfo(
int index)
override;
46 std::vector<TensorInfo> GetInputInfos()
override;
47 std::vector<TensorInfo> GetOutputInfos()
override;
49 std::unique_ptr<BaseBackend> Clone(RuntimeOption &runtime_option,
50 void* stream =
nullptr,
51 int device_id = -1)
override;
55 InitFromPaddle(
const std::string& model_file,
const std::string& params_file,
56 const OpenVINOBackendOption& option = OpenVINOBackendOption());
59 InitFromOnnx(
const std::string& model_file,
60 const OpenVINOBackendOption& option = OpenVINOBackendOption());
63 void InitTensorInfo(
const std::vector<ov::Output<ov::Node>>& ov_outputs,
64 std::map<std::string, TensorInfo>* tensor_infos);
66 ov::CompiledModel compiled_model_;
67 ov::InferRequest request_;
68 OpenVINOBackendOption option_;
69 std::vector<TensorInfo> input_infos_;
70 std::vector<TensorInfo> output_infos_;
All C++ FastDeploy APIs are defined inside this namespace.
Definition: option.h:16