29 enum class AffinityType {
36 enum class TensorType {
44 enum class TensorLayout {
57 std::vector<size_t> shape;
58 std::pair<void *, size_t> buffer;
63 AffinityType affinity;
64 std::pair<void *, size_t> modelStream;
65 std::pair<void *, size_t> algoStream;
66 std::string modelPath;
71 enum class ReturnStatus {
79 ReturnStatus GetIOFormats(
80 ModelHandle modelHandle, std::vector<IOTensor> &inputs, std::vector<IOTensor> &outputs);
82 ReturnStatus
PrepareModel(ModelHandle modelHandle,
const std::vector<IOTensor> &inputs);
84 ReturnStatus GetInputTensors(ModelHandle modelHandle, std::vector<IOTensor> &inputs);
86 ReturnStatus ResizeInput(ModelHandle modelHandle,
const std::vector<IOTensor> &inputs);
88 ResultHandle AllocResult(ModelHandle modelHandle,
const std::vector<IOTensor> &outputs);
91 ModelHandle modelHandle, ResultHandle resultHandle,
const std::vector<IOTensor> &inputs);
93 ReturnStatus GetOutputTensors(ResultHandle resultHandle, std::vector<IOTensor> &outputs);
95 ReturnStatus FreeResult(ResultHandle resultHandle);
101 #endif // DLLITE_BOLT_H batch->channel/8->high->width->channel four element data order
void PrepareModel(ModelHandle ih, const int num_inputs, const char **name, const int *n, const int *c, const int *h, const int *w, const DATA_TYPE *dt, const DATA_FORMAT *df)
complete model inference engine prepare
power is high priority(use small core)
ModelHandle CreateModel(const char *modelPath, AFFINITY_TYPE affinity, const char *algorithmMapPath)
create model from file
performance is high priority(use big core)
void DestroyModel(ModelHandle ih)
destroy model
batch->channel->high->width data order
void RunModel(ModelHandle ih, ResultHandle ir, int num_inputs, const char **name, void **data)
inference result from input
batch->high->width->channel data order