21 #include "fastdeploy/core/allocate.h" 22 #include "fastdeploy/core/fd_scalar.h" 23 #include "fastdeploy/core/fd_type.h" 45 void SetData(
const std::vector<int64_t>& tensor_shape,
const FDDataType& data_type,
void* data_buffer,
bool copy =
false,
const Device& data_device = Device::CPU,
int data_device_id = -1) {
46 SetExternalData(tensor_shape, data_type, data_buffer, data_device, data_device_id);
62 void ExpandDim(int64_t axis = 0);
65 void Squeeze(int64_t axis = 0);
68 bool Reshape(
const std::vector<int64_t>& new_shape);
77 std::vector<int64_t>
Shape()
const {
return shape; }
80 FDDataType
Dtype()
const {
return dtype; }
90 void Allocate(
const FDDataType& data_type,
const std::vector<int64_t>& data_shape) {
91 Allocate(data_shape, data_type, name);
95 void PrintInfo(
const std::string& prefix =
"Debug TensorInfo: ")
const;
98 std::string name =
"";
101 bool IsShared() {
return external_data_ptr !=
nullptr; }
109 void* buffer_ =
nullptr;
110 std::vector<int64_t> shape = {0};
111 FDDataType dtype = FDDataType::INT8;
116 void* external_data_ptr =
nullptr;
121 Device device = Device::CPU;
128 bool is_pinned_memory =
false;
133 std::vector<int8_t> temporary_cpu_buffer;
138 size_t nbytes_allocated = 0;
145 const void* Data()
const;
153 const void* CpuData()
const;
159 void SetExternalData(
const std::vector<int64_t>& new_shape,
160 const FDDataType& data_type,
void* data_buffer,
161 const Device& new_device = Device::CPU,
162 int new_device_id = -1);
166 void Allocate(
const std::vector<int64_t>& new_shape,
167 const FDDataType& data_type,
168 const std::string& tensor_name =
"",
169 const Device& new_device = Device::CPU);
171 void Resize(
size_t nbytes);
173 void Resize(
const std::vector<int64_t>& new_shape);
175 void Resize(
const std::vector<int64_t>& new_shape,
176 const FDDataType& data_type,
const std::string& tensor_name =
"",
177 const Device& new_device = Device::CPU);
179 bool ReallocFn(
size_t nbytes);
184 explicit FDTensor(
const std::string& tensor_name);
185 explicit FDTensor(
const char* tensor_name);
198 explicit FDTensor(
const Scalar& scalar);
202 static void CopyBuffer(
void* dst,
const void* src,
size_t nbytes,
203 const Device& device = Device::CPU,
204 bool is_pinned_memory =
false);
void SetData(const std::vector< int64_t > &tensor_shape, const FDDataType &data_type, void *data_buffer, bool copy=false, const Device &data_device=Device::CPU, int data_device_id=-1)
Set data buffer for a FDTensor, e.g std::vector<float> buffer(1 * 3 * 224 * 224, 0); FDTensor tensor;...
Definition: fd_tensor.h:45
FDDataType Dtype() const
Get dtype of tensor.
Definition: fd_tensor.h:80
FDTensor object used to represend data matrix.
Definition: fd_tensor.h:31
A brief file description.
void Allocate(const FDDataType &data_type, const std::vector< int64_t > &data_shape)
Allocate cpu data buffer for a FDTensor, e.g FDTensor tensor; tensor.Allocate(FDDataType::FLOAT, {1, 3, 224, 224};.
Definition: fd_tensor.h:90
bool IsShared()
Whether the tensor is owned the data buffer or share the data buffer from outside.
Definition: fd_tensor.h:101
void * GetData()
Get data pointer of tensor.
Definition: fd_tensor.h:53
std::vector< int64_t > Shape() const
Get shape of tensor.
Definition: fd_tensor.h:77
const void * GetData() const
Get data pointer of tensor.
Definition: fd_tensor.h:57
All C++ FastDeploy APIs are defined inside this namespace.
Definition: option.h:16