17 #include "fastdeploy/core/fd_tensor.h" 18 #include "fastdeploy/utils/utils.h" 24 #ifndef NON_64_PLATFORM 25 #include "onnxruntime_cxx_api.h" 28 #include "fastdeploy/runtime/backends/common/cuda/adaptive_pool2d_kernel.h" 32 struct AdaptivePool2dKernel {
34 std::string pooling_type_ =
"avg";
35 std::vector<int64_t> output_size_ = {};
36 Ort::CustomOpApi ort_;
37 void* compute_stream_;
38 const char* provider_;
41 AdaptivePool2dKernel(Ort::CustomOpApi ort,
const OrtKernelInfo* info,
48 void GetAttribute(
const OrtKernelInfo* info);
50 void Compute(OrtKernelContext* context);
52 void CpuAdaptivePool(
const std::vector<int64_t>& input_size,
53 const std::vector<int64_t>& output_size,
54 const float* input_data,
float* output_data);
57 struct AdaptivePool2dOp
58 : Ort::CustomOpBase<AdaptivePool2dOp, AdaptivePool2dKernel> {
59 explicit AdaptivePool2dOp(
const char* provider) : provider_(provider) {}
60 void* CreateKernel(Ort::CustomOpApi api,
const OrtKernelInfo* info)
const {
61 return new AdaptivePool2dKernel(api, info, provider_);
64 const char* GetName()
const {
return "AdaptivePool2d"; }
66 size_t GetInputTypeCount()
const {
return 1; }
68 ONNXTensorElementDataType GetInputType(
size_t index)
const {
69 return ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT;
72 size_t GetOutputTypeCount()
const {
return 1; }
74 ONNXTensorElementDataType GetOutputType(
size_t index)
const {
75 return ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT;
78 const char* GetExecutionProviderType()
const {
return provider_; }
81 const char* provider_;
All C++ FastDeploy APIs are defined inside this namespace.
Definition: option.h:16