FastDeploy  latest
Fast & Easy to Deploy!
trt_backend.h
1 // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #pragma once
16 
17 #include <cuda_runtime_api.h>
18 
19 #include <iostream>
20 #include <map>
21 #include <string>
22 #include <vector>
23 
24 #include "NvInfer.h"
25 #include "NvOnnxParser.h"
26 #include "fastdeploy/runtime/backends/backend.h"
27 #include "fastdeploy/runtime/backends/tensorrt/utils.h"
28 #include "fastdeploy/runtime/backends/tensorrt/option.h"
29 #include "fastdeploy/utils/unique_ptr.h"
30 
31 class Int8EntropyCalibrator2 : public nvinfer1::IInt8EntropyCalibrator2 {
32  public:
33  explicit Int8EntropyCalibrator2(const std::string& calibration_cache)
34  : calibration_cache_(calibration_cache) {}
35 
36  int getBatchSize() const noexcept override { return 0; }
37 
38  bool getBatch(void* bindings[], const char* names[],
39  int nbBindings) noexcept override {
40  return false;
41  }
42 
43  const void* readCalibrationCache(size_t& length) noexcept override {
44  length = calibration_cache_.size();
45  return length ? calibration_cache_.data() : nullptr;
46  }
47 
48  void writeCalibrationCache(const void* cache,
49  size_t length) noexcept override {
50  fastdeploy::FDERROR << "NOT IMPLEMENT." << std::endl;
51  }
52 
53  private:
54  const std::string calibration_cache_;
55 };
56 
57 namespace fastdeploy {
58 
59 struct TrtValueInfo {
60  std::string name;
61  std::vector<int> shape;
62  nvinfer1::DataType dtype; // dtype of TRT model
63  FDDataType original_dtype; // dtype of original ONNX/Paddle model
64 };
65 
66 std::vector<int> toVec(const nvinfer1::Dims& dim);
67 size_t TrtDataTypeSize(const nvinfer1::DataType& dtype);
68 FDDataType GetFDDataType(const nvinfer1::DataType& dtype);
69 
70 class TrtBackend : public BaseBackend {
71  public:
72  TrtBackend() : engine_(nullptr), context_(nullptr) {}
73 
74  bool Init(const RuntimeOption& runtime_option);
75  bool Infer(std::vector<FDTensor>& inputs, std::vector<FDTensor>* outputs,
76  bool copy_to_fd = true) override;
77 
78  int NumInputs() const { return inputs_desc_.size(); }
79  int NumOutputs() const { return outputs_desc_.size(); }
80  TensorInfo GetInputInfo(int index);
81  TensorInfo GetOutputInfo(int index);
82  std::vector<TensorInfo> GetInputInfos() override;
83  std::vector<TensorInfo> GetOutputInfos() override;
84  std::unique_ptr<BaseBackend> Clone(RuntimeOption &runtime_option,
85  void* stream = nullptr,
86  int device_id = -1) override;
87 
88  ~TrtBackend() {
89  if (parser_) {
90  parser_.reset();
91  }
92  }
93 
94  private:
95  void BuildOption(const TrtBackendOption& option);
96 
97  bool InitFromPaddle(const std::string& model_buffer,
98  const std::string& params_buffer,
99  const TrtBackendOption& option = TrtBackendOption(),
100  bool verbose = false);
101  bool InitFromOnnx(const std::string& model_buffer,
102  const TrtBackendOption& option = TrtBackendOption());
103 
104  TrtBackendOption option_;
105  std::shared_ptr<nvinfer1::ICudaEngine> engine_;
106  std::shared_ptr<nvinfer1::IExecutionContext> context_;
107  FDUniquePtr<nvonnxparser::IParser> parser_;
108  FDUniquePtr<nvinfer1::IBuilder> builder_;
109  FDUniquePtr<nvinfer1::INetworkDefinition> network_;
110  cudaStream_t stream_{};
111  std::vector<void*> bindings_;
112  std::vector<TrtValueInfo> inputs_desc_;
113  std::vector<TrtValueInfo> outputs_desc_;
114  std::map<std::string, FDDeviceBuffer> inputs_device_buffer_;
115  std::map<std::string, FDDeviceBuffer> outputs_device_buffer_;
116  std::map<std::string, int> io_name_index_;
117 
118  std::string calibration_str_;
119  bool save_external_ = false;
120  std::string model_file_name_ = "";
121 
122  // Sometimes while the number of outputs > 1
123  // the output order of tensorrt may not be same
124  // with the original onnx model
125  // So this parameter will record to origin outputs
126  // order, to help recover the rigt order
127  std::map<std::string, int> outputs_order_;
128 
129  // temporary store onnx model content
130  // once it used to build trt egnine done
131  // it will be released
132  std::string onnx_model_buffer_;
133  // Stores shape information of the loaded model
134  // For dynmaic shape will record its range information
135  // Also will update the range information while inferencing
136  std::map<std::string, ShapeRangeInfo> shape_range_info_;
137 
138  // If the final output tensor's dtype is different from the
139  // model output tensor's dtype, then we need cast the data
140  // to the final output's dtype.
141  // E.g. When trt model output tensor is int32, but final tensor is int64
142  // This map stores the casted tensors.
143  std::map<std::string, FDTensor> casted_output_tensors_;
144 
145  void GetInputOutputInfo();
146  bool CreateTrtEngineFromOnnx(const std::string& onnx_model_buffer);
147  bool BuildTrtEngine();
148  bool LoadTrtCache(const std::string& trt_engine_file);
149  int ShapeRangeInfoUpdated(const std::vector<FDTensor>& inputs);
150  void SetInputs(const std::vector<FDTensor>& inputs);
151  void AllocateOutputsBuffer(std::vector<FDTensor>* outputs,
152  bool copy_to_fd = true);
153 };
154 
155 } // namespace fastdeploy
All C++ FastDeploy APIs are defined inside this namespace.
Definition: option.h:16