FastDeploy  latest
Fast & Easy to Deploy!
model.h
1 // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. //NOLINT
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #pragma once
16 #include "fastdeploy/vision/faceid/contrib/insightface/base.h"
17 
18 namespace fastdeploy {
19 namespace vision {
20 namespace faceid {
21 class FASTDEPLOY_DECL ArcFace : public InsightFaceRecognitionBase {
22  public:
30  ArcFace(const std::string& model_file, const std::string& params_file = "",
31  const RuntimeOption& custom_option = RuntimeOption(),
32  const ModelFormat& model_format = ModelFormat::ONNX)
33  : InsightFaceRecognitionBase(model_file, params_file, custom_option,
34  model_format) {
35  if (model_format == ModelFormat::ONNX) {
36  valid_cpu_backends = {Backend::ORT};
37  valid_gpu_backends = {Backend::ORT, Backend::TRT};
38  } else if (model_format == ModelFormat::RKNN) {
39  valid_rknpu_backends = {Backend::RKNPU2};
40  } else {
41  valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::LITE};
42  valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
43  valid_kunlunxin_backends = {Backend::LITE};
44  }
45  initialized = Initialize();
46  }
47 
48  virtual std::string ModelName() const { return "ArcFace"; }
49 };
50 
51 class FASTDEPLOY_DECL CosFace : public InsightFaceRecognitionBase {
52  public:
60  CosFace(const std::string& model_file, const std::string& params_file = "",
61  const RuntimeOption& custom_option = RuntimeOption(),
62  const ModelFormat& model_format = ModelFormat::ONNX)
63  : InsightFaceRecognitionBase(model_file, params_file, custom_option,
64  model_format) {
65  if (model_format == ModelFormat::ONNX) {
66  valid_cpu_backends = {Backend::ORT};
67  valid_gpu_backends = {Backend::ORT, Backend::TRT};
68  } else if (model_format == ModelFormat::RKNN) {
69  valid_rknpu_backends = {Backend::RKNPU2};
70  } else {
71  valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::LITE};
72  valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
73  valid_kunlunxin_backends = {Backend::LITE};
74  }
75  initialized = Initialize();
76  }
77 
78  virtual std::string ModelName() const { return "CosFace"; }
79 };
80 class FASTDEPLOY_DECL PartialFC : public InsightFaceRecognitionBase {
81  public:
89  PartialFC(const std::string& model_file, const std::string& params_file = "",
90  const RuntimeOption& custom_option = RuntimeOption(),
91  const ModelFormat& model_format = ModelFormat::ONNX)
92  : InsightFaceRecognitionBase(model_file, params_file, custom_option,
93  model_format) {
94  if (model_format == ModelFormat::ONNX) {
95  valid_cpu_backends = {Backend::ORT};
96  valid_gpu_backends = {Backend::ORT, Backend::TRT};
97  } else if (model_format == ModelFormat::RKNN) {
98  valid_rknpu_backends = {Backend::RKNPU2};
99  } else {
100  valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::LITE};
101  valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
102  valid_kunlunxin_backends = {Backend::LITE};
103  }
104  initialized = Initialize();
105  }
106 
107  virtual std::string ModelName() const { return "PartialFC"; }
108 };
109 class FASTDEPLOY_DECL VPL : public InsightFaceRecognitionBase {
110  public:
118  VPL(const std::string& model_file, const std::string& params_file = "",
119  const RuntimeOption& custom_option = RuntimeOption(),
120  const ModelFormat& model_format = ModelFormat::ONNX)
121  : InsightFaceRecognitionBase(model_file, params_file, custom_option,
122  model_format) {
123  if (model_format == ModelFormat::ONNX) {
124  valid_cpu_backends = {Backend::ORT};
125  valid_gpu_backends = {Backend::ORT, Backend::TRT};
126  } else if (model_format == ModelFormat::RKNN) {
127  valid_rknpu_backends = {Backend::RKNPU2};
128  } else {
129  valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::LITE};
130  valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
131  valid_kunlunxin_backends = {Backend::LITE};
132  }
133  initialized = Initialize();
134  }
135 
136  virtual std::string ModelName() const { return "VPL"; }
137 };
138 
139 } // namespace faceid
140 } // namespace vision
141 } // namespace fastdeploy
Paddle Inference, support Paddle format model, CPU / Nvidia GPU.
Definition: enum_variables.h:35
ModelFormat
Definition: enum_variables.h:67
TensorRT, support Paddle/ONNX format model, Nvidia GPU only.
Definition: enum_variables.h:34
RKNPU2, support RKNN format model, Rockchip NPU only.
Definition: enum_variables.h:39
Model with RKNN format.
Definition: enum_variables.h:71
Paddle Lite, support Paddle format model, ARM CPU only.
Definition: enum_variables.h:38
Model with ONNX format.
Definition: enum_variables.h:70
All C++ FastDeploy APIs are defined inside this namespace.
Definition: option.h:16