22 #include "fastdeploy/runtime/backends/backend.h" 23 #include "fastdeploy/runtime/backends/paddle/option.h" 24 #ifdef ENABLE_PADDLE2ONNX 25 #include "paddle2onnx/converter.h" 27 #include "fastdeploy/utils/unique_ptr.h" 28 #include "paddle_inference_api.h" 33 paddle_infer::PlaceType ConvertFDDeviceToPlace(Device device);
36 void ShareTensorFromFDTensor(paddle_infer::Tensor* tensor, FDTensor& fd_tensor);
38 void ShareOutTensorFromFDTensor(paddle_infer::Tensor* tensor,
45 FDTensor* fd_tensor,
bool copy_to_fd);
48 FDDataType PaddleDataTypeToFD(
const paddle_infer::DataType& dtype);
51 FDDataType ReaderDataTypeToFD(int32_t dtype);
53 class PaddleBackend :
public BaseBackend {
56 virtual ~PaddleBackend() =
default;
57 bool Init(
const RuntimeOption& option);
58 bool Infer(std::vector<FDTensor>& inputs, std::vector<FDTensor>* outputs,
59 bool copy_to_fd =
true)
override;
61 int NumInputs()
const override {
return inputs_desc_.size(); }
63 int NumOutputs()
const override {
return outputs_desc_.size(); }
65 std::unique_ptr<BaseBackend> Clone(RuntimeOption &runtime_option,
66 void* stream =
nullptr,
67 int device_id = -1)
override;
69 TensorInfo GetInputInfo(
int index)
override;
70 TensorInfo GetOutputInfo(
int index)
override;
71 std::vector<TensorInfo> GetInputInfos()
override;
72 std::vector<TensorInfo> GetOutputInfos()
override;
75 void BuildOption(
const PaddleBackendOption& option);
77 bool InitFromPaddle(
const std::string& model,
78 const std::string& params,
79 bool model_from_memory,
80 const PaddleBackendOption& option = PaddleBackendOption());
83 CollectShapeRun(paddle_infer::Predictor* predictor,
84 const std::map<std::string, std::vector<int>>& shape)
const;
85 void GetDynamicShapeFromOption(
86 const PaddleBackendOption& option,
87 std::map<std::string, std::vector<int>>* max_shape,
88 std::map<std::string, std::vector<int>>* min_shape,
89 std::map<std::string, std::vector<int>>* opt_shape)
const;
90 void SetTRTDynamicShapeToConfig(
const PaddleBackendOption& option);
91 PaddleBackendOption option_;
92 paddle_infer::Config config_;
93 std::shared_ptr<paddle_infer::Predictor> predictor_;
94 std::vector<TensorInfo> inputs_desc_;
95 std::vector<TensorInfo> outputs_desc_;
void PaddleTensorToFDTensor(std::unique_ptr< paddle_infer::Tensor > &tensor, FDTensor *fd_tensor, bool copy_to_fd)
else share memory to FDTensor
Definition: util.cc:101
All C++ FastDeploy APIs are defined inside this namespace.
Definition: option.h:16