FastDeploy  latest
Fast & Easy to Deploy!
runtime_option.h
Go to the documentation of this file.
1 // Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #pragma once
16 
18 
20 
21 #ifdef __cplusplus
22 extern "C" {
23 #endif
24 
32 
39  __fd_take FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper);
40 
49  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper,
50  const char* model_path, const char* params_path,
51  const FD_C_ModelFormat format);
52 
61  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper,
62  const char* model_buffer, const char* params_buffer,
63  const FD_C_ModelFormat);
64 
70  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper);
71 
77  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper,
78  int gpu_id);
79 
87  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper,
88  FD_C_rknpu2_CpuName rknpu2_name, FD_C_rknpu2_CoreMask rknpu2_core);
89 
95  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper);
96 
102  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper);
103 
128  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper,
129  int kunlunxin_id, int l3_workspace_size, FD_C_Bool locked,
130  FD_C_Bool autotune, const char* autotune_file, const char* precision,
131  FD_C_Bool adaptive_seqlen, FD_C_Bool enable_multi_stream);
132 
138  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper);
139 
140 
141 
143  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper,
144  void* external_stream);
145 
153  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper,
154  int thread_num);
155 
163  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper,
164  int level);
165 
173  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper);
174 
180 FASTDEPLOY_CAPI_EXPORT extern void
182  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper);
183 
190  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper);
191 
198  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper);
199 
206  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper);
207 
214  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper);
215 
222  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper);
223 
230  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper);
231 
237 FASTDEPLOY_CAPI_EXPORT extern void
239  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper);
240 
248  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper,
249  FD_C_Bool pd_mkldnn);
250 
257  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper);
258 
265 FASTDEPLOY_CAPI_EXPORT extern void
267  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper,
268  const char* delete_pass_name);
269 
276  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper);
277 
283 FASTDEPLOY_CAPI_EXPORT extern void
285  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper);
286 
293 FASTDEPLOY_CAPI_EXPORT extern void
295  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper, int size);
296 
304  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper,
305  const char* name);
306 
313 FASTDEPLOY_CAPI_EXPORT extern void
315  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper,
316  const char* optimized_model_dir);
317 
324 FASTDEPLOY_CAPI_EXPORT extern void
326  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper,
327  const char* nnadapter_subgraph_partition_config_path);
328 
335 FASTDEPLOY_CAPI_EXPORT extern void
337  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper,
338  const char* nnadapter_subgraph_partition_config_buffer);
339 
346 FASTDEPLOY_CAPI_EXPORT extern void
348  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper,
349  const char* nnadapter_context_properties);
350 
357 FASTDEPLOY_CAPI_EXPORT extern void
359  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper,
360  const char* nnadapter_model_cache_dir);
361 
368 FASTDEPLOY_CAPI_EXPORT extern void
370  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper,
371  const char* nnadapter_mixed_precision_quantization_config_path);
372 
379  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper);
380 
387  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper);
388 
395  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper);
396 
403  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper);
404 
412  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper,
413  FD_C_LitePowerMode mode);
414 
421  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper);
422 
429  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper);
430 
438  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper,
439  const char* cache_file_path);
440 
447  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper);
448 
455  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper);
456 
462 FASTDEPLOY_CAPI_EXPORT extern void
464  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper);
465 
471 FASTDEPLOY_CAPI_EXPORT extern void
473  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper);
474 
482  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper,
483  int num_streams);
484 
495  __fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper,
496  int device_num, int micro_batch_size, FD_C_Bool enable_pipelining,
497  int batches_per_step);
498 
499 #ifdef __cplusplus
500 } // extern "C"
501 #endif
int8_t FD_C_Bool
Definition: fd_common.h:58
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperDisablePaddleLogInfo(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper)
Disable print debug information while using Paddle Inference as inference backend.
Definition: runtime_option.cc:231
#define FASTDEPLOY_CAPI_EXPORT
Definition: fd_common.h:27
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperSetLiteSubgraphPartitionPath(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper, const char *nnadapter_subgraph_partition_config_path)
Set subgraph partition path for Paddle Lite backend.
Definition: runtime_option.cc:262
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperEnablePaddleLogInfo(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper)
Enable print debug information while using Paddle Inference as inference backend, the backend disable...
Definition: runtime_option.cc:224
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperSetLiteContextProperties(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper, const char *nnadapter_context_properties)
Set context properties for Paddle Lite backend.
Definition: runtime_option.cc:280
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperEnablePaddleTrtCollectShape(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper)
Enable to collect shape in paddle trt backend.
Definition: runtime_option.cc:377
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperSetLitePowerMode(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper, FD_C_LitePowerMode mode)
Set power mode while using Paddle Lite as inference backend, mode(0: LITE_POWER_HIGH; 1: LITE_POWER_L...
Definition: runtime_option.cc:332
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperDisablePaddleTrtCollectShape(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper)
Disable to collect shape in paddle trt backend.
Definition: runtime_option.cc:384
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperDisableLiteFP16(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper)
disable half precision, change to full precision(float32)
Definition: runtime_option.cc:311
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperUseAscend(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper)
Use Huawei Ascend to inference.
Definition: runtime_option.cc:91
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperDeletePaddleBackendPass(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper, const char *delete_pass_name)
Delete pass by name while using Paddle Inference as inference backend, this can be called multiple ti...
Definition: runtime_option.cc:216
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperUseSophgo(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper)
Definition: runtime_option.cc:111
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperUsePorosBackend(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper)
Set Poros backend as inference backend, support CPU/GPU.
Definition: runtime_option.cc:175
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperSetOrtGraphOptLevel(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper, int level)
Set ORT graph opt level, default is decide by ONNX Runtime itself.
Definition: runtime_option.cc:134
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperSetPaddleMKLDNNCacheSize(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper, int size)
Set shape cache size while using Paddle Inference with mkldnn, by default it will cache all the diffe...
Definition: runtime_option.cc:238
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperSetTrtCacheFile(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper, const char *cache_file_path)
Set cache file path while use TensorRT backend. Loadding a Paddle/ONNX model and initialize TensorRT ...
Definition: runtime_option.cc:355
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperUsePaddleLiteBackend(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper)
Wrapper function of UseLiteBackend()
Definition: runtime_option.cc:196
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperSetOpenVINOStreams(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper, int num_streams)
Set number of streams by the OpenVINO backends.
Definition: runtime_option.cc:391
Definition: types_internal.h:26
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperSetPaddleMKLDNN(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper, FD_C_Bool pd_mkldnn)
Set mkldnn switch while using Paddle Inference as inference backend.
Definition: runtime_option.cc:201
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperUseRKNPU2(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper, FD_C_rknpu2_CpuName rknpu2_name, FD_C_rknpu2_CoreMask rknpu2_core)
Use RKNPU2 to inference.
Definition: runtime_option.cc:74
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperSetOpenVINODevice(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper, const char *name)
Set device name for OpenVINO, default 'CPU', can also be 'AUTO', 'GPU', 'GPU.1'....
Definition: runtime_option.cc:246
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperUseGpu(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper, int gpu_id)
Use Nvidia GPU to inference.
Definition: runtime_option.cc:66
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperUseTrtBackend(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper)
Set TensorRT as inference backend, only support GPU.
Definition: runtime_option.cc:168
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperDisablePinnedMemory(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper)
Disable pinned memory.
Definition: runtime_option.cc:370
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperDisableTrtFP16(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper)
Disable FP16 inference while using TensorRT backend.
Definition: runtime_option.cc:348
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperEnableLiteFP16(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper)
enable half precision while use paddle lite backend
Definition: runtime_option.cc:304
FASTDEPLOY_CAPI_EXPORT __fd_give FD_C_RuntimeOptionWrapper * FD_C_CreateRuntimeOptionWrapper()
Create a new FD_C_RuntimeOptionWrapper object.
Definition: runtime_option.cc:24
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperSetLiteMixedPrecisionQuantizationConfigPath(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper, const char *nnadapter_mixed_precision_quantization_config_path)
Set mixed precision quantization config path for Paddle Lite backend.
Definition: runtime_option.cc:297
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperUseCpu(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper)
Use cpu to inference, the runtime will inference on CPU by default.
Definition: runtime_option.cc:59
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperSetCpuThreadNum(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper, int thread_num)
Set number of cpu threads while inference on CPU, by default it will decided by the different backend...
Definition: runtime_option.cc:126
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperUseOrtBackend(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper)
Set ONNX Runtime as inference backend, support CPU/GPU.
Definition: runtime_option.cc:154
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperUsePaddleBackend(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper)
Set Paddle Inference as inference backend, support CPU/GPU.
Definition: runtime_option.cc:142
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperSetModelPath(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper, const char *model_path, const char *params_path, const FD_C_ModelFormat format)
Set path of model file and parameter file.
Definition: runtime_option.cc:38
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperSetExternalStream(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper, void *external_stream)
Definition: runtime_option.cc:118
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperSetLiteSubgraphPartitionConfigBuffer(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper, const char *nnadapter_subgraph_partition_config_buffer)
Set subgraph partition path for Paddle Lite backend.
Definition: runtime_option.cc:271
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperUseTimVX(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper)
Use TimVX to inference.
Definition: runtime_option.cc:84
#define __fd_keep
Definition: fd_common.h:55
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperUseLiteBackend(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper)
Set Paddle Lite as inference backend, only support arm cpu.
Definition: runtime_option.cc:189
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperEnableTrtFP16(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper)
Enable FP16 inference while using TensorRT backend. Notice: not all the GPU device support FP16...
Definition: runtime_option.cc:341
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperDisableLiteInt8(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper)
disable int8 precision, change to full precision(float32)
Definition: runtime_option.cc:325
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperSetLiteOptimizedModelDir(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper, const char *optimized_model_dir)
Set optimzed model dir for Paddle Lite backend.
Definition: runtime_option.cc:254
#define __fd_take
Definition: fd_common.h:46
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperUseKunlunXin(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper, int kunlunxin_id, int l3_workspace_size, FD_C_Bool locked, FD_C_Bool autotune, const char *autotune_file, const char *precision, FD_C_Bool adaptive_seqlen, FD_C_Bool enable_multi_stream)
Turn on KunlunXin XPU.
Definition: runtime_option.cc:98
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperEnablePinnedMemory(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper)
Enable pinned memory. Pinned memory can be utilized to speedup the data transfer between CPU and GPU...
Definition: runtime_option.cc:363
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperSetModelBuffer(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper, const char *model_buffer, const char *params_buffer, const FD_C_ModelFormat)
Specify the memory buffer of model and parameter. Used when model and params are loaded directly from...
Definition: runtime_option.cc:49
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperUsePaddleInferBackend(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper)
Wrapper function of UsePaddleBackend()
Definition: runtime_option.cc:149
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperSetLiteModelCacheDir(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper, const char *nnadapter_model_cache_dir)
Set model cache dir for Paddle Lite backend.
Definition: runtime_option.cc:289
#define __fd_give
Definition: fd_common.h:37
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperUseOpenVINOBackend(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper)
Set OpenVINO as inference backend, only support CPU.
Definition: runtime_option.cc:182
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperUseIpu(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper, int device_num, int micro_batch_size, FD_C_Bool enable_pipelining, int batches_per_step)
Graphcore IPU to inference.
Definition: runtime_option.cc:399
FASTDEPLOY_CAPI_EXPORT void FD_C_DestroyRuntimeOptionWrapper(__fd_take FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper)
Destroy a FD_C_RuntimeOptionWrapper object.
Definition: runtime_option.cc:33
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperEnableLiteInt8(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper)
enable int8 precision while use paddle lite backend
Definition: runtime_option.cc:318
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperEnablePaddleToTrt(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper)
If TensorRT backend is used, EnablePaddleToTrt will change to use Paddle Inference backend...
Definition: runtime_option.cc:209
FASTDEPLOY_CAPI_EXPORT void FD_C_RuntimeOptionWrapperUseSophgoBackend(__fd_keep FD_C_RuntimeOptionWrapper *fd_c_runtime_option_wrapper)
Set SOPHGO Runtime as inference backend, support CPU/GPU.
Definition: runtime_option.cc:161